diff --git a/CMakeLists.txt b/CMakeLists.txt index dc9b30e5481a32e8c89d1e4337ebfeb5fd40f627..b572d7bd16c4fb2725a13324c2fe0b697e9a02a2 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,6 +26,13 @@ SET(CMAKE_VERBOSE_MAKEFILE ON) # open the file named TDengine.sln # +SET(TD_GODLL FALSE) +IF (${DLLTYPE} MATCHES "go") + ADD_DEFINITIONS(-D_TD_GO_DLL_) + MESSAGE(STATUS "input dll type: " ${DLLTYPE}) + SET(TD_GODLL TRUE) +ENDIF () + IF (NOT DEFINED TD_CLUSTER) MESSAGE(STATUS "Build the Lite Version") SET(TD_CLUSTER FALSE) @@ -183,9 +190,11 @@ IF (NOT DEFINED TD_CLUSTER) ENDIF () ELSEIF (TD_WINDOWS_64) SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE) - SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") - SET(DEBUG_FLAGS "/Zi /W3 /GL") - SET(RELEASE_FLAGS "/W0 /GL") + IF (NOT TD_GODLL) + SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") + SET(DEBUG_FLAGS "/Zi /W3 /GL") + SET(RELEASE_FLAGS "/W0 /GL") + ENDIF () ADD_DEFINITIONS(-DWINDOWS) ADD_DEFINITIONS(-D__CLEANUP_C) ADD_DEFINITIONS(-DPTW32_STATIC_LIB) @@ -251,21 +260,31 @@ IF (NOT DEFINED TD_CLUSTER) INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") ELSEIF (TD_WINDOWS_64) SET(CMAKE_INSTALL_PREFIX C:/TDengine) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) - INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - #INSTALL(TARGETS taos RUNTIME DESTINATION driver) - #INSTALL(TARGETS shell RUNTIME DESTINATION .) - IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-1.0.2-dist.jar DESTINATION connector/jdbc) + IF (NOT TD_GODLL) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) + INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) + #INSTALL(TARGETS taos RUNTIME DESTINATION driver) + #INSTALL(TARGETS shell RUNTIME DESTINATION .) + IF (TD_MVN_INSTALLED) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-1.0.2-dist.jar DESTINATION connector/jdbc) + ENDIF () + ELSE () + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver) ENDIF () + ELSEIF (TD_DARWIN_64) + SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") + INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") + INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") + INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin)") ENDIF () ENDIF () diff --git a/README.md b/README.md index fddd7d31322911947815ba8589374666b88e5d69..6efa0ff8ef026c51a859123bf37c71d570d3758d 100644 --- a/README.md +++ b/README.md @@ -45,10 +45,16 @@ mkdir build && cd build cmake .. && cmake --build . ``` -if compiling on an arm64 processor, you need add one parameter: +To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below: +aarch64: ```cmd -cmake .. -DARMVER=arm64 && cmake --build . +cmake .. -DCPUTYPE=aarch64 && cmake --build . +``` + +aarch32: +```cmd +cmake .. -DCPUTYPE=aarch32 && cmake --build . ``` # Quick Run diff --git a/deps/iconv/iconv.c b/deps/iconv/iconv.c index b84a09fd0ae9d0920152fcd2c08ac4fa7c2ad268..391e35d4e78efda0cb881b247cbccf22ab66c67b 100644 --- a/deps/iconv/iconv.c +++ b/deps/iconv/iconv.c @@ -175,7 +175,10 @@ static const struct alias sysdep_aliases[] = { #ifdef __GNUC__ __inline #endif -const struct alias * +// gcc -o0 bug fix +// see http://git.savannah.gnu.org/gitweb/?p=libiconv.git;a=blobdiff;f=lib/iconv.c;h=31853a7f1c47871221189dbf597473a16d8a8da7;hp=5a1a32597fa3efc5f69624d37a2eb96f308cd241;hb=b29089d8b43abc8fba073da7e6dccaeba56b2b70;hpb=0a04404c90d6a725b8b6bbcd65e10c5fcf5993e9 + +static const struct alias * aliases2_lookup (register const char *str) { const struct alias * ptr; diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index 46a2b04daa323ab672b60b90614a9665205184c8..a0433d1f09d7c5f2ec1205f89d2efe638703dc7d 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -175,26 +175,34 @@ TDengine provides APIs for continuous query driven by time, which run queries pe ### C/C++ subscription API -For the time being, TDengine supports subscription on one table. It is implemented through periodic pulling from a TDengine server. +For the time being, TDengine supports subscription on one or multiple tables. It is implemented through periodic pulling from a TDengine server. -- `TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)` - The API is used to start a subscription session by given a handle. The parameters required are _host_ (IP address of a TDenginer server), _user_ (username), _pass_ (password), _db_ (database to use), _table_ (table name to subscribe), _time_ (start time to subscribe, 0 for now), _mseconds_ (pulling period). If failed to open a subscription session, a _NULL_ pointer is returned. +* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` + The API is used to start a subscription session, it returns the subscription object on success and `NULL` in case of failure, the parameters are: + * **taos**: The database connnection, which must be established already. + * **restart**: `Zero` to continue a subscription if it already exits, other value to start from the beginning. + * **topic**: The unique identifier of a subscription. + * **sql**: A sql statement for data query, it can only be a `select` statement, can only query for raw data, and can only query data in ascending order of the timestamp field. + * **fp**: A callback function to receive query result, only used in asynchronization mode and should be `NULL` in synchronization mode, please refer below for its prototype. + * **param**: User provided additional parameter for the callback function. + * **interval**: Pulling interval in millisecond. Under asynchronization mode, API will call the callback function `fp` in this interval, system performance will be impacted if this interval is too short. Under synchronization mode, if the duration between two call to `taos_consume` is less than this interval, the second call blocks until the duration exceed this interval. -- `TAOS_ROW taos_consume(TAOS_SUB *tsub)` - The API used to get the new data from a TDengine server. It should be put in an infinite loop. The parameter _tsub_ is the handle returned by _taos_subscribe_. If new data are updated, the API will return a row of the result. Otherwise, the API is blocked until new data arrives. If _NULL_ pointer is returned, it means an error occurs. +* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` + Prototype of the callback function, the parameters are: + * tsub: The subscription object. + * res: The query result. + * param: User provided additional parameter when calling `taos_subscribe`. + * code: Error code in case of failures. -- `void taos_unsubscribe(TAOS_SUB *tsub)` - Stop a subscription session by the handle returned by _taos_subscribe_. - - -- `int taos_num_subfields(TAOS_SUB *tsub)` - The API used to get the number of fields in a row. +* `TAOS_RES *taos_consume(TAOS_SUB *tsub)` + The API used to get the new data from a TDengine server. It should be put in an loop. The parameter `tsub` is the handle returned by `taos_subscribe`. This API should only be called in synchronization mode. If the duration between two call to `taos_consume` is less than pulling interval, the second call blocks until the duration exceed the interval. The API returns the new rows if new data arrives, or empty rowset otherwise, and if there's an error, it returns `NULL`. + +* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` -- `TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)` - The API used to get the description of each column. + Stop a subscription session by the handle returned by `taos_subscribe`. If `keepProgress` is **not** zero, the subscription progress information is kept and can be reused in later call to `taos_subscribe`, the information is removed otherwise. ## Java Connector @@ -208,7 +216,7 @@ Since the native language of TDengine is C, the necessary TDengine library shoul * taos.dll (Windows) After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path. -> Note: Please make sure that TDengine Windows client has been installed if developing on Windows. +> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15]. Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: * TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method. @@ -583,13 +591,35 @@ data = c1.fetchall() numOfRows = c1.rowcount numOfCols = len(c1.description) for irow in range(numOfRows): - print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]) + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) # use the cursor as an iterator to retrieve all returned results c1.execute('select * from tb') for data in c1: print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) ``` + +* create a subscription +```python +# Create a subscription with topic 'test' and consumption interval 1000ms. +# The first argument is True means to restart the subscription; +# if the subscription with topic 'test' has already been created, then pass +# False to this argument means to continue the existing subscription. +sub = conn.subscribe(True, "test", "select * from meters;", 1000) +``` + +* consume a subscription +```python +data = sub.consume() +for d in data: + print(d) +``` + +* close the subscription +```python +sub.close() +``` + * close the connection ```python c1.close() @@ -882,4 +912,5 @@ An example of using the NodeJS connector to achieve the same things but without [11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo [13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE - +[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3 +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B diff --git a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md index 347ac4f21f00202a3848cf4a48694dcbd64cb274..1e9383c40c22bf645413405b88b72bf78f9e4d4b 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md @@ -1,6 +1,8 @@ # TAOS SQL -TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句操纵数据库,也可以通过C/C++, Java(JDBC), Python, Go等各种程序来执行SQL语句。 +本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。 + +TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供修改和更新功能,因此在TAO SQL中不提供数据更新和数据删除的相关功能。 本章节SQL语法遵循如下约定: @@ -9,11 +11,41 @@ TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句 - | 表示多选一,选择其中一个即可,但不能输入|本身 - … 表示前面的项可重复多个 +为更好地说明SQL语法的规则及其特点,本文假设存在一个数据集。该数据集是针对两种类型的设备温度(湿度)传感器、气压(海拔)传感器建立的数据模型。 +针对温度传感器,具有超级表(super table) temp_stable。其数据模型如下: +``` +taos> describe temp_stable; +Field | Type | Length | Note | +======================================================================================================= +ts |TIMESTAMP | 8 | | +temperature |FLOAT | 4 | | +humidity |TINYINT | 1 | | +status |TINYINT | 1 | | +deviceid |BIGINT | 12 |tag | +location |BINARY | 20 |tag | +``` +数据集包含2个温度传感器的数据,按照TDengine的建模规则,对应2个子表,其名称分别是 temp_tb_1,temp_tb_2 。 +针对压力(海拔)传感器,具有超级表(super table) pressure_stable。其数据模型如下: +数据集包含2个压力传感器数据,对应2个子表,分别是 press_tb_1,press_tb_2。 + +```text +taos> describe pressure_stable; +Field | Type | Length | Note | +======================================================================================================= +ts |TIMESTAMP | 8 | | +height |FLOAT | 4 | | +pressure |FLOAT | 4 | | +devstat |TINYINT | 1 | | +id |BIGINT | 8 |tag | +city |NCHAR | 20 |tag | +longitude |FLOAT | 4 |tag | +latitude |FLOAT | 4 |tag | +``` ## 支持的数据类型 使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: -- 时间格式为YYYY-MM-DD HH:mm:ss.MS, 默认时间分辨率为毫秒。比如:2017-08-12 18:25:58.128 +- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` - 内部函数now是服务器的当前时间 - 插入记录时,如果时间戳为0,插入数据时使用服务器当前时间 - Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 @@ -27,13 +59,13 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | | 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 | -| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31被用作Null值 | -| 3 | BIGINT | 8 | 长整型,范围 [-2^59, 2^59] | +| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null | +| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL | | 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数15-16,范围 [-1.7E308, 1.7E308] | | 6 | BINARY | 自定义 | 用于记录字符串,最长不能超过504 bytes。binary仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如binary(20)定义了最长为20个字符的字符串,每个字符占1byte的存储空间。如果用户字符串超出20字节,将被自动截断。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示, 即 **\’**。 | -| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767] | -| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127] | +| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL | +| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL | | 9 | BOOL | 1 | 布尔型,{true, false} | | 10 | NCHAR | 自定义 | 用于记录非ASCII字符串,如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 **\’**。nchar使用时须指定字符串大小,类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符,会固定占用40bytes的空间。如用户字符串长度超出声明长度,则将被自动截断。 | @@ -165,19 +197,172 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ## 数据查询 -###查询语法是: +### 查询语法: ```mysql -SELECT {* | expr_list} FROM tb_name - [WHERE where_condition] - [ORDER BY _c0 { DESC | ASC }] - [LIMIT limit [, OFFSET offset]] - [>> export_file] - -SELECT function_list FROM tb_name - [WHERE where_condition] - [LIMIT limit [, OFFSET offset]] - [>> export_file] +SELECT [DISTINCT] select_expr [, select_expr ...] +FROM {tb_name_list} +[WHERE where_condition] +[INTERVAL [interval_offset,] interval_val] +[FILL fill_val] +[SLIDING fill_val] +[GROUP BY col_list] +[ORDER BY col_list { DESC | ASC }] +[HAVING expr_list] +[SLIMIT limit_val [, SOFFSET offset_val]] +[LIMIT limit_val [, OFFSET offset_val]] +[>> export_file] +``` +#### SELECT子句 +一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。 + +##### 通配符 +通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。 +``` +taos> select * from temp_tb_1; +ts | temperature |humidity|status| +============================================================ +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | +19-04-28 14:22:08.000| 21.50000 | 38 | 1 | +19-04-28 14:22:09.000| 21.30000 | 38 | 1 | +19-04-28 14:22:10.000| 21.20000 | 38 | 1 | +19-04-28 14:22:11.000| 21.30000 | 35 | 0 | +19-04-28 14:22:12.000| 22.00000 | 34 | 0 | +``` +在针对超级表,通配符包含 _标签列_ 。 +``` +taos> select * from temp_stable; +ts | temperature |humidity|status| deviceid | location | +============================================================================================== +19-04-28 14:22:07.000| 21.00000 | 37 | 1 |54197 |beijing | +19-04-28 14:22:07.000| 20.00000 | 34 | 1 |91234 |beijing | +19-04-28 14:22:08.000| 21.50000 | 38 | 1 |91234 |beijing | +19-04-28 14:22:09.000| 21.30000 | 38 | 1 |91234 |beijing | +19-04-28 14:22:10.000| 21.20000 | 38 | 1 |91234 |beijing | +19-04-28 14:22:11.000| 21.30000 | 35 | 0 |91234 |beijing | +19-04-28 14:22:12.000| 22.00000 | 34 | 0 |91234 |beijing | +``` +通配符支持表名前缀,以下两个SQL语句均为返回全部的列: +``` +select * from temp_tb_1; +select temp_tb_1.* from temp_tb_1; +``` +在Join查询中,带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。 +``` +taos> select * from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts; +ts | temperature |humidity|status| ts | temperature |humidity|status| +======================================================================================================================== +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | 19-04-28 14:22:07.000| 21.00000 | 37 | 1 | +``` + +``` +taos> select temp_tb_1.* from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts; +ts | temperature |humidity|status| +============================================================ +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | +``` + +在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于: +```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。 + +``` +taos> select count(*) from temp_tb_1; +count(*) | +====================== +1 | +``` + +``` +taos> select first(*) from temp_tb_1; +first(ts) | first(temperature) |first(humidity)|first(status)| +========================================================================== +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | +``` + +#### 结果集列名 + +```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如: +``` +taos> select ts, ts as primary_key_ts from temp_tb_1; +ts | primary_key_ts | +============================================== +19-04-28 14:22:07.000| 19-04-28 14:22:07.000| +``` +但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。 + +#### DISTINCT修饰符* +只能用于修饰标签列(TAGS)的结果,不能用于修饰普通列来获得去重后的结果。并且应用```DISTINCT```以后,只能进行单列的标签输出。 +```count(distinct column_name)```用以返回近似的不重复结果的数量,该结果是近似值。 + +#### 隐式结果列 +```Select_exprs```可以是表所属列的列名,也可以是基于列的函数表达式或计算式,数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后,在最后返回结果中会强制返回时间戳列(第一列)和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出,列输出完全由select子句控制。 + +#### 表(超级表)列表 + +FROM关键字后面可以是若干个表(超级表)列表,也可以是子查询的结果。 +如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```sample.temp_tb_1``` 方式来跨库使用表。 +``` +SELECT * FROM sample.temp_tb_1; +------------------------------ +use sample; +SELECT * FROM temp_tb_1; +``` +From子句中列表可以使用别名来让SQL整体更加简单。 +``` +SELECT t.ts FROM temp_tb_1 t ; +``` +> 暂不支持FROM子句的表别名 + +#### 特殊功能 +部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database() +``` +taos> SELECT database(); +database() | +================================= +sample | +``` +如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据,则返回NULL。 +``` +taos> select database(); +database() | +================================= +NULL | +``` +获取服务器和客户端版本号: +``` +SELECT client_version() +SELECT server_version() +``` +服务器状态检测语句。如果服务器正常,返回一个数字(例如 1)。如果服务器异常,返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。 +``` +SELECT server_status() +SELECT server_status() AS result +``` +#### TAOS SQL中特殊关键词 + + > TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名
+ \_c0: 表示表(超级表)的第一列 + +#### 小技巧 +获取一个超级表所有的子表名及相关的标签信息: +``` +SELECT TBNAME, location FROM temp_stable +``` +统计超级表下辖子表数量: +``` +SELECT COUNT(TBNAME) FROM temp_stable +``` +以上两个查询均只支持在Where条件子句中添加针对标签(TAGS)的过滤条件。例如: +``` +taos> select count(tbname) from temp_stable; +count(tbname) | +====================== +2 | + +taos> select count(tbname) from temp_stable where deviceid > 60000; +count(tbname) | +====================== +1 | ``` - 可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名 @@ -238,7 +423,7 @@ SELECT function_list FROM tb_name ###聚合函数 -TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数如下表: +TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数如下: - **COUNT** ```mysql @@ -261,13 +446,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数 适用于:表、超级表。 -- **WAVG** +- **TWA** ```mysql - SELECT WAVG(field_name) FROM tb_name WHERE clause + SELECT TWA(field_name) FROM tb_name WHERE clause ``` - 功能说明:统计表/超级表中某列在一段时间内的时间加权平均。 + 功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。 返回结果数据类型:双精度浮点数Double。 - 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 说明:时间加权平均(time weighted average, TWA)查询需要指定查询时间段的 _开始时间_ 和 _结束时间_ 。 适用于:表、超级表。 @@ -371,7 +557,15 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 说明:*k*值取值范围0≤*k*≤100,为0的时候等同于MIN,为100的时候等同于MAX。 - +- **APERCENTILE** + ```mysql + SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause] + ``` + 功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。 + 返回结果数据类型: 双精度浮点数Double。 + 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 说明:*k*值取值范围0≤*k*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数 + - **LAST_ROW** ```mysql SELECT LAST_ROW(field_name) FROM { tb_name | stb_name } diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index 2a250a916c13ff2957f1e0bdb21096ed947b4b01..35beb610f2789ea9d521eaa0314160a4ddff4025 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -337,6 +337,8 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。 insert into tb1 file a.csv b.csv tb2 c.csv … import into tb1 file a.csv b.csv tb2 c.csv … ``` +> 注意:导入的CSV文件不能够带表头, 且表的列与CSV文件的列需要严格对应。 +> 同样还可以使用[样例数据导入工具][1]对数据进行横向和纵向扩展导入。 ## 数据导出 @@ -407,3 +409,6 @@ KILL STREAM TDengine启动后,会自动创建一个监测数据库`LOG`,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果。 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。 + + +[1]: https://github.com/taosdata/TDengine/tree/develop/importSampleData \ No newline at end of file diff --git a/documentation/webdocs/markdowndocs/advanced features-ch.md b/documentation/webdocs/markdowndocs/advanced features-ch.md index 14a2801209c9b92ed16d38ed220cee5c3684cd4d..4d01eaf364cfe17b4cb4658dc8596fbb52a65ae2 100644 --- a/documentation/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation/webdocs/markdowndocs/advanced features-ch.md @@ -63,28 +63,11 @@ CREATE TABLE QUERY_RES ## 数据订阅(Publisher/Subscriber) 基于数据天然的时间序列特性,TDengine的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine中里每一张表均可视为一个标准的消息队列。 -TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API,用户可订阅数据库中的某一张表(或超级表)。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 +TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API,用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 TDengine的订阅与推送服务的状态是客户端维持,TDengine服务器并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 -#### API说明 - -使用订阅的功能,主要API如下: - - -示例代码:请看安装包中的的示范程序 +订阅相关API请见 [连接器](https://www.taosdata.com/cn/documentation/connector/)。 ## 缓存 (Cache) TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。 diff --git a/documentation/webdocs/markdowndocs/connector-ch.md b/documentation/webdocs/markdowndocs/connector-ch.md index 23bc6a9f6c8f26fbea2c883e6625e7544f76a2bc..b5d8fb5afb12ede82f2cdcd9ea29e20e8a82d6b8 100644 --- a/documentation/webdocs/markdowndocs/connector-ch.md +++ b/documentation/webdocs/markdowndocs/connector-ch.md @@ -164,27 +164,36 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 ### C/C++ 数据订阅接口 -订阅API目前支持订阅一张表,并通过定期轮询的方式不断获取写入表中的最新数据。 +订阅API目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。 -- `TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)` +* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` - 该API用来启动订阅,需要提供的参数包含:TDengine管理主节点的IP地址、用户名、密码、数据库、数据库表的名字;time是开始订阅消息的时间,是从1970年1月1日起计算的毫秒数,为长整型, 如果设为0,表示从当前时间开始订阅;mseconds为查询数据库更新的时间间隔,单位为毫秒,建议设为1000毫秒。返回值为一指向TDengine_SUB结构的指针,如果返回为空,表示失败。 + 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: + * taos:已经建立好的数据库连接 + * restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + * topic:订阅的主题(即名称),此参数是订阅的唯一标识 + * sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 + * fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` + * param:调用回调函数时的附加参数,系统API将其原样传递到回调函数,不进行任何处理 + * interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用`taos_consume`的间隔小于此周期,API将会阻塞,直到时间间隔超过此周期。 -- `TAOS_ROW taos_consume(TAOS_SUB *tsub)` +* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` - 该API用来获取最新消息,应用程序一般会将其置于一个无限循环语句中。其中参数tsub是taos_subscribe的返回值。如果数据库有新的记录,该API将返回,返回参数是一行记录。如果没有新的记录,该API将阻塞。如果返回值为空,说明系统出错,需要检查系统是否还在正常运行。 + 异步模式下,回调函数的原型,其参数为: + * tsub:订阅对象 + * res:查询结果集,注意结果集中可能没有记录 + * param:调用 `taos_subscribe`时客户程序提供的附加参数 + * code:错误码 -- `void taos_unsubscribe(TAOS_SUB *tsub)` - 该API用于取消订阅,参数tsub是taos_subscribe的返回值。应用程序退出时,需要调用该API,否则有资源泄露。 +* `TAOS_RES *taos_consume(TAOS_SUB *tsub)` -- `int taos_num_subfields(TAOS_SUB *tsub)` + 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。 - 该API用来获取返回的一排数据中数据的列数 +* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` -- `TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)` + 取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 - 该API用来获取每列数据的属性(数据类型、名字、字节数),与taos_num_subfields配合使用,可用来解析返回的一排数据。 ## Java Connector @@ -198,38 +207,38 @@ TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API * taos.dll 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 -> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。 +> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: * TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 * 由于不支持删除和修改,所以也不支持事务操作。 * 目前不支持表间的 union 操作。 -* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。 +* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。 ## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | -| --- | --- | --- | +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| --- | --- | --- | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | ## TDengine DataType 和 Java DataType TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: -| TDengine DataType | Java DataType | -| --- | --- | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | +| TDengine DataType | Java DataType | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | | SMALLINT, TINYINT |java.lang.Short | -| BOOL | java.lang.Boolean | -| BINARY, NCHAR | java.lang.String | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | ## 如何获取 TAOS-JDBCDriver @@ -579,13 +588,34 @@ data = c1.fetchall() numOfRows = c1.rowcount numOfCols = len(c1.description) for irow in range(numOfRows): - print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]) + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) # 直接使用cursor 循环拉取查询结果 c1.execute('select * from tb') for data in c1: print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) ``` + +* 创建订阅 +```python +# 创建一个主题为 'test' 消费周期为1000毫秒的订阅 +# 第一个参数为 True 表示重新开始订阅,如为 False 且之前创建过主题为 'test' 的订阅,则表示继续消费此订阅的数据,而不是重新开始消费所有数据 +sub = conn.subscribe(True, "test", "select * from meters;", 1000) +``` + +* 消费订阅的数据 +```python +data = sub.consume() +for d in data: + print(d) +``` + +* 取消订阅 +```python +sub.close() +``` + + * 关闭连接 ```python c1.close() @@ -807,6 +837,8 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ## Go Connector +### linux环境 + #### 安装TDengine Go的连接器使用到了 libtaos.so 和taos.h,因此,在使用Go连接器之前,需要在程序运行的机器上安装TDengine以获得相关的驱动文件。 @@ -867,7 +899,14 @@ taosSql驱动包内采用cgo模式,调用了TDengine的C/C++同步接口,与 3. 创建表、写入和查询数据 -在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码 +在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码。 + +### windows环境 + +在windows上使用Go,请参考  +[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/) + + ## Node.js Connector @@ -1054,6 +1093,8 @@ https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos ├───├── jdbc ├───└── python ├── driver +├───├── libtaos.dll +├───├── libtaos.dll.a ├───├── taos.dll ├───├── taos.exp ├───└── taos.lib @@ -1078,8 +1119,8 @@ https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos + Client可执行文件: C:/TDengine/taos.exe + 配置文件: C:/TDengine/cfg/taos.cfg -+ C驱动程序目录: C:/TDengine/driver -+ C驱动程序头文件: C:/TDengine/include ++ 驱动程序目录: C:/TDengine/driver ++ 驱动程序头文件: C:/TDengine/include + JDBC驱动程序目录: C:/TDengine/connector/jdbc + GO驱动程序目录:C:/TDengine/connector/go + Python驱动程序目录:C:/TDengine/connector/python @@ -1106,6 +1147,14 @@ taos -h TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,需要包含TDengine头文件taos.h,连接时需要链接TDengine库taos.lib,运行时将taos.dll放到可执行文件目录下。 +#### Go接口注意事项 + +TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,除了需要Go的驱动包(C:\TDengine\connector\go)外,还需要包含TDengine头文件taos.h,连接时需要链接TDengine库libtaos.dll、libtaos.dll.a(C:\TDengine\driver),运行时将libtaos.dll、libtaos.dll.a放到可执行文件目录下。 + +使用参考请见: + +[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/) + #### JDBC接口注意事项 在Windows系统上,应用程序可以使用JDBC接口来操纵数据库,使用JDBC接口的注意事项如下: @@ -1121,6 +1170,49 @@ TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序 + 将Windows开发包(taos.dll)放置到system32目录下。 +## Mac客户端及程序接口 + +### 客户端安装 + +在Mac操作系统下,TDengine提供64位的Mac客户端([2月10日起提供下载](https://www.taosdata.com/cn/all-downloads/#tdengine_mac-list)),客户端安装程序为.tar.gz文件,解压并运行其中的install_client.sh后即可完成安装,安装路径为/usr/loca/taos。客户端目录结构如下: + +``` +├── cfg +├───└── taos.cfg +├── connector +├───├── go +├───├── grafana +├───├── jdbc +├───└── python +├── driver +├───├── libtaos.1.6.5.1.dylib +├── examples +├───├── bash +├───├── c +├───├── C# +├───├── go +├───├── JDBC +├───├── lua +├───├── matlab +├───├── nodejs +├───├── python +├───├── R +├───└── rust +├── include +├───└── taos.h +└── bin +├───└── taos +``` + +其中,最常用的文件列出如下: + ++ Client可执行文件: /usr/local/taos/bin/taos 软连接到 /usr/local/bin/taos ++ 配置文件: /usr/local/taos/cfg/taos.cfg 软连接到 /etc/taos/taos.cfg ++ 驱动程序目录: /usr/local/taos/driver/libtaos.1.6.5.1.dylib 软连接到 /usr/local/lib/libtaos.dylib ++ 驱动程序头文件: /usr/local/taos/include/taos.h 软连接到 /usr/local/include/taos.h ++ 日志目录(第一次运行程序时生成):~/TDengineLog + + [1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver [2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver @@ -1135,3 +1227,5 @@ TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序 [11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo [13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3 +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B diff --git a/importSampleData/README.md b/importSampleData/README.md index 0678676d4e85d568068dfa138904baf7a8ef03e5..ee3a6e073c18b618af49a9c0b6d2d6d07718f00f 100644 --- a/importSampleData/README.md +++ b/importSampleData/README.md @@ -97,6 +97,10 @@ go build -o bin/taosimport app/main.go 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。 +* -savetb int + + 当 save 为 1 时保存统计信息的表名, 默认 statistic。 + * -auto int 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0。 diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go index 087b7bb7614e8a03da0ce9fae0c5693340314236..aef413320710012fec79e56677e16864a881ca8f 100644 --- a/importSampleData/app/main.go +++ b/importSampleData/app/main.go @@ -28,6 +28,7 @@ const ( DEFAULT_STARTTIME int64 = -1 DEFAULT_INTERVAL int64 = 1*1000 DEFAULT_DELAY int64 = -1 + DEFAULT_STATISTIC_TABLE = "statistic" JSON_FORMAT = "json" CSV_FORMAT = "csv" @@ -37,7 +38,6 @@ const ( DRIVER_NAME = "taosSql" STARTTIME_LAYOUT = "2006-01-02 15:04:05.000" INSERT_PREFIX = "insert into " - STATISTIC_TABLE = "statistic" ) var ( @@ -75,6 +75,7 @@ var ( delay int64 // default 10 milliseconds tick int64 save int + saveTable string ) type superTableConfig struct { @@ -278,9 +279,9 @@ func staticSpeed(){ if save == 1 { connection.Exec("use " + db) - _, err := connection.Exec("create table if not exists " + STATISTIC_TABLE +"(ts timestamp, speed int)") + _, err := connection.Exec("create table if not exists " + saveTable +"(ts timestamp, speed int)") if err != nil { - log.Fatalf("create %s Table error: %s\n", STATISTIC_TABLE, err) + log.Fatalf("create %s Table error: %s\n", saveTable, err) } } @@ -297,7 +298,7 @@ func staticSpeed(){ log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed) if save == 1 { - insertSql := fmt.Sprintf("insert into %s values(%d, %d)", STATISTIC_TABLE, currentTime.UnixNano()/1e6, speed) + insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed) connection.Exec(insertSql) } @@ -353,7 +354,7 @@ func createStatisticTable(){ connection := getConnection() defer connection.Close() - _, err := connection.Exec("create table if not exist " + db + "."+ STATISTIC_TABLE +"(ts timestamp, speed int)") + _, err := connection.Exec("create table if not exist " + db + "."+ saveTable +"(ts timestamp, speed int)") if err != nil { log.Fatalf("createStatisticTable error: %s\n", err) } @@ -1037,6 +1038,7 @@ func parseArg() { flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.") flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.") flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.") + flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.") flag.IntVar(&thread, "thread", 10, "number of threads to import data.") flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.") flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.") @@ -1062,6 +1064,7 @@ func printArg() { fmt.Println("-delay:", delay) fmt.Println("-tick:", tick) fmt.Println("-save:", save) + fmt.Println("-savetb:", saveTable) fmt.Println("-thread:", thread) fmt.Println("-batch:", batch) fmt.Println("-auto:", auto) diff --git a/importSampleData/bin/taosimport b/importSampleData/bin/taosimport index 1cb3c12926ec6657190471ea590e79f4a6b191b6..b042549341bced364e0fd77909b115d1b5b6dc04 100755 Binary files a/importSampleData/bin/taosimport and b/importSampleData/bin/taosimport differ diff --git a/importSampleData/dashboard/sensor_info.json b/importSampleData/dashboard/sensor_info.json index 414fa7e8839a5957898a6ed500a863b554255082..6dcf5505f2a1a2db3a10cb9c7bed47ac5dc3687c 100644 --- a/importSampleData/dashboard/sensor_info.json +++ b/importSampleData/dashboard/sensor_info.json @@ -15,7 +15,7 @@ "editable": true, "gnetId": null, "graphTooltip": 0, - "id": 4, + "id": 7, "links": [], "panels": [ { @@ -84,7 +84,7 @@ { "alias": "lastest_temperature", "refId": "A", - "sql": "select last(temperature) from test.s_sensor_info where location = 'beijing' and ts > now - 1h and ts < now interval(1m)", + "sql": "select ts, temp from test.stream_temp_last where ts >= $from and ts < $to", "target": "select metric", "type": "timeserie" } @@ -116,9 +116,10 @@ "options": { "fieldOptions": { "calcs": [ - "mean" + "last" ], "defaults": { + "decimals": 2, "mappings": [], "max": 100, "min": 0, @@ -146,7 +147,7 @@ { "alias": "maxHumidity", "refId": "A", - "sql": "select max(humidity) from test.s_sensor_info where location = 'beijing' and ts > now -1h and ts < now", + "sql": "select ts, humidity from test.stream_humidity_max where ts >= $from and ts < $to", "target": "select metric", "type": "timeserie" } @@ -198,7 +199,7 @@ { "alias": "avgTemperature", "refId": "A", - "sql": "select avg(temperature) from test.s_sensor_info where location = 'beijing' and ts > now -1h and ts < now interval(1m)", + "sql": "select ts, temp from test.stream_temp_avg where ts >= $from and ts < $to", "target": "select metric", "type": "timeserie" } @@ -284,23 +285,23 @@ "steppedLine": false, "targets": [ { - "alias": "avg", + "alias": "max", "refId": "A", - "sql": "select max(temperature) from test.t_0_sensor_info where ts > now -1h and ts < now interval(1m)", + "sql": "select ts, max_temp from test.stream_sensor where ts >= $from and ts < $to", "target": "select metric", "type": "timeserie" }, { - "alias": "max", + "alias": "avg", "refId": "B", - "sql": "select avg(temperature) from test.t_0_sensor_info where ts > now -1h and ts < now interval(1m)", + "sql": "select ts, avg_temp from test.stream_sensor where ts >= $from and ts < $to", "target": "select metric", "type": "timeserie" }, { "alias": "min", "refId": "C", - "sql": "select min(temperature) from test.t_0_sensor_info where ts > now -1h and ts < now interval(1m)", + "sql": "select ts, min_temp from test.stream_sensor where ts >= $from and ts < $to", "target": "select metric", "type": "timeserie" } @@ -355,7 +356,7 @@ "list": [] }, "time": { - "from": "now-1h", + "from": "now-5m", "to": "now" }, "timepicker": { diff --git a/minidevops/README.MD b/minidevops/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..f9ec4a8f190071d98d4b17de5b0bf3671bd38d6a --- /dev/null +++ b/minidevops/README.MD @@ -0,0 +1,219 @@ +# 一分钟快速搭建一个DevOps监控系统 +为了让更多的Devops领域的开发者快速体验TDengine的优秀特性,本文介绍了一种快速搭建Devops领域性能监控的demo,方便大家更方便的了解TDengine,并基于此文拓展Devops领域的应用。 +为了快速上手,本文用到的软件全部采用Docker容器方式部署,大家只需要安装Docker软件,就可以直接通过脚本运行所有软件,无需安装。这个Demo用到了以下Docker容器,都可以从Dockerhub上拉取相关镜像 +- tdengine/tdengine:1.6.4.5 TDengine开源版1.6.4.5.的镜像 +- tdengine/blm_telegraf:latest 用于telegraf写入TDengine的API,可以schemaless的将telegraf的数据写入TDengine +- tdengine/blm_prometheus:latest 用于Prometheus写入TDengine的API,可以schemaless的将Prometheus的数据写入TDengine +- grafana/grafana Grafana的镜像,一个广泛应用的开源可视化监控软件 +- telegraf:latest 一个广泛应用的开源数据采集程序 +- prom/prometheus:latest 一个广泛应用的k8s领域的开源数据采集程序 +## 说明 +本文中的图片链接在Github上显示不出来,建议将MD文件下载后用vscode或其他md文件浏览工具进行查看 +## 前提条件 +1. 一台linux服务器或运行linux操作系统的虚拟机或者运行MacOS的计算机 +2. 安装了Docker软件。Docker软件的安装方法请参考linux下安装Docker +3. sudo权限 +4. 下载本文用到的配置文件和脚本压缩包:[下载地址](http://www.taosdata.com/download/minidevops.tar.gz) + +压缩包下载下来后解压生成一个minidevops的文件夹,其结构如下 +```sh +minidevops$ tree +. +├── demodashboard.json +├── grafana +│   └── tdengine +│   ├── README.md +│   ├── css +│   │   └── query-editor.css +│   ├── datasource.js +│   ├── img +│   │   └── taosdata_logo.png +│   ├── module.js +│   ├── partials +│   │   ├── config.html +│   │   └── query.editor.html +│   ├── plugin.json +│   └── query_ctrl.js +├── prometheus +│   └── prometheus.yml +├── run.sh +└── telegraf + └── telegraf.conf +``` +`grafana`子文件夹里是TDengine的插件,用于在grafana中导入TDengine的数据源。 +`prometheus`子文件夹里是prometheus需要的配置文件。 +`run.sh`是运行脚本。 +`telegraf`子文件夹里是telegraf的配置文件。 +## 启动Docker镜像 +启动前,请确保系统里没有运行TDengine和Grafana,以及Telegraf和Prometheus,因为这些程序会占用docker所需的端口,造成脚本运行失败,建议先关闭这些程序。 +然后,只用在minidevops路径下执行 +```sh +sudo run.sh +``` +我们来看看`run.sh`里干了些什么: +```sh +#!/bin/bash + +LP=`pwd` + +#为了让脚本能够顺利执行,避免重复执行时出现错误, 首先将系统里所有docker容器停止了。请注意,如果该linux上已经运行了其他docker容器,也会被停止掉。 +docker rm -f `docker ps -a -q` + +#专门创建一个叫minidevops的虚拟网络,并指定了172.15.1.1~255这个地址段。 +docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 minidevops + +#启动grafana程序,并将tdengine插件文件所在路径绑定到容器中 +docker run -d --net minidevops --ip 172.15.1.11 -v $LP/grafana:/var/lib/grafana/plugins -p 3000:3000 grafana/grafana + +#启动tdengine的docker容器,并指定IP地址为172.15.1.6,绑定需要的端口 +docker run -d --net minidevops --ip 172.15.1.6 -p 6030:6030 -p 6020:6020 -p 6031:6031 -p 6032:6032 -p 6033:6033 -p 6034:6034 -p 6035:6035 -p 6036:6036 -p 6037:6037 -p 6038:6038 -p 6039:6039 tdengine/tdengine:1.6.4.5 + +#启动prometheus的写入代理程序,这个程序可以将prometheus发来的数据直接写入TDengine中,无需提前建立相关超级表和表,实现schemaless写入功能 +docker run -d --net minidevops --ip 172.15.1.7 -p 10203:10203 tdengine/blm_prometheus 172.15.1.6 + +#启动telegraf的写入代理程序,这个程序可以将telegraf发来的数据直接写入TDengine中,无需提前建立相关超级表和表,实现schemaless写入功能 +docker run -d --net minidevops --ip 172.15.1.8 -p 10202:10202 tdengine/blm_telegraf 172.15.1.6 + +#启动prometheus程序,并将配置文件所在路径绑定到容器中 +docker run -d --net minidevops --ip 172.15.1.9 -v $LP/prometheus:/etc/prometheus -p 9090:9090 prom/prometheus + +#启动telegraf程序,并将配置文件所在路径绑定到容器中 +docker run -d --net minidevops --ip 172.15.1.10 -v $LP/telegraf:/etc/telegraf -p 8092:8092 -p 8094:8094 -p 8125:8125 telegraf + +#通过Grafana的API,将TDengine配置成Grafana的datasources +curl -X POST http://localhost:3000/api/datasources --header "Content-Type:application/json" -u admin:admin -d '{"Name": "TDengine","Type": "tdengine","TypeLogoUrl": "public/plugins/tdengine/img/taosdata_logo.png","Access": "proxy","Url": "http://172.15.1.6:6020","BasicAuth": false,"isDefault": true,"jsonData": {},"readOnly": false}' + +#通过Grafana的API,配置一个示范的监控面板 +curl -X POST http://localhost:3000/api/dashboards/db --header "Content-Type:application/json" -u admin:admin -d '{"dashboard":{"annotations":{"list":[{"builtIn":1,"datasource":"-- Grafana --","enable":true,"hide":true,"iconColor":"rgba(0, 211, 255, 1)","name":"Annotations & Alerts","type":"dashboard"}]},"editable":true,"gnetId":null,"graphTooltip":0,"id":1,"links":[],"panels":[{"datasource":null,"gridPos":{"h":8,"w":6,"x":0,"y":0},"id":6,"options":{"fieldOptions":{"calcs":["mean"],"defaults":{"color":{"mode":"thresholds"},"links":[{"title":"","url":""}],"mappings":[],"max":100,"min":0,"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]},"unit":"percent"},"overrides":[],"values":false},"orientation":"auto","showThresholdLabels":false,"showThresholdMarkers":true},"pluginVersion":"6.6.0","targets":[{"refId":"A","sql":"select last_row(value) from telegraf.mem where field=\"used_percent\""}],"timeFrom":null,"timeShift":null,"title":"Memory used percent","type":"gauge"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":6,"y":0},"hiddenSeries":false,"id":8,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"dataLinks":[]},"percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"alias":"MEMUSED-PERCENT","refId":"A","sql":"select avg(value) from telegraf.mem where field=\"used_percent\" interval(1m)"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Panel Title","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"datasource":null,"gridPos":{"h":9,"w":6,"x":0,"y":8},"id":10,"options":{"fieldOptions":{"calcs":["mean"],"defaults":{"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null}]},"unit":"percent"},"overrides":[],"values":false},"orientation":"auto","showThresholdLabels":false,"showThresholdMarkers":true},"pluginVersion":"6.6.0","targets":[{"alias":"CPU-SYS","refId":"A","sql":"select last_row(value) from telegraf.cpu where field=\"usage_system\""},{"alias":"CPU-IDLE","refId":"B","sql":"select last_row(value) from telegraf.cpu where field=\"usage_idle\""},{"alias":"CPU-USER","refId":"C","sql":"select last_row(value) from telegraf.cpu where field=\"usage_user\""}],"timeFrom":null,"timeShift":null,"title":"Panel Title","type":"gauge"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"TDengine","description":"General CPU monitor","fill":1,"fillGradient":0,"gridPos":{"h":9,"w":12,"x":6,"y":8},"hiddenSeries":false,"id":2,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"dataLinks":[]},"percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"alias":"CPU-USER","refId":"A","sql":"select avg(value) from telegraf.cpu where field=\"usage_user\" and cpu=\"cpu-total\" interval(1m)"},{"alias":"CPU-SYS","refId":"B","sql":"select avg(value) from telegraf.cpu where field=\"usage_system\" and cpu=\"cpu-total\" interval(1m)"},{"alias":"CPU-IDLE","refId":"C","sql":"select avg(value) from telegraf.cpu where field=\"usage_idle\" and cpu=\"cpu-total\" interval(1m)"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"refresh":"10s","schemaVersion":22,"style":"dark","tags":["demo"],"templating":{"list":[]},"time":{"from":"now-3h","to":"now"},"timepicker":{"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"]},"timezone":"","title":"TDengineDashboardDemo","id":null,"uid":null,"version":0}}' +``` +执行以上脚本后,可以通过docker container ls命令来确认容器运行的状态: +```sh +$docker container ls +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +f875bd7d90d1 telegraf "/entrypoint.sh tele…" 6 hours ago Up 6 hours 0.0.0.0:8092->8092/tcp, 8092/udp, 0.0.0.0:8094->8094/tcp, 8125/udp, 0.0.0.0:8125->8125/tcp wonderful_antonelli +38ee2d5c3cb3 prom/prometheus "/bin/prometheus --c…" 6 hours ago Up 6 hours 0.0.0.0:9090->9090/tcp infallible_mestorf +1a1939386c07 tdengine/blm_telegraf "/root/blm_telegraf …" 6 hours ago Up 6 hours 0.0.0.0:10202->10202/tcp stupefied_hypatia +7063eb05caa4 tdengine/blm_prometheus "/root/blm_prometheu…" 6 hours ago Up 6 hours 0.0.0.0:10203->10203/tcp jovial_feynman +4a7b27931d21 tdengine/tdengine:1.6.4.5 "taosd" 6 hours ago Up 6 hours 0.0.0.0:6020->6020/tcp, 0.0.0.0:6030-6039->6030-6039/tcp, 6040-6050/tcp eager_kowalevski +ad2895760bc0 grafana/grafana "/run.sh" 6 hours ago Up 6 hours 0.0.0.0:3000->3000/tcp romantic_mccarthy +``` +当以上几个容器都已正常运行后,则我们的demo小系统已经开始工作了。 +## Grafana中进行配置 +打开浏览器,在地址栏输入服务器所在的IP地址 +`http://localhost:3000` +就可以访问到grafana的页面,如果不在本机打开浏览器,则将localhost改成server的ip地址即可。 +进入登录页面,用户名和密码都是缺省的admin,输入后,即可进入grafana的控制台输入用户名/密码后,会进入修改密码页面,选择skip,跳过这一步。进入Grafana后,可以在页面的左下角看到TDengineDashboardDemo已经创建好了,![](https://www.taosdata.com/blog/wp-content/uploads/2020/02/image2020-2-1_22-50-58-1024x465.png)对于有些浏览器打开时,可能会在home页面中没有TDengineDashboardDemo的选项,可以通过在Dashboard->Manage中选择![](https://www.taosdata.com/blog/wp-content/uploads/2020/02/2-1024x553.png)TDengineDashboardDemo。点击TDengineDashboardDemo进入示例监控面板。刚点进去页面时,监控曲线是空白的,因为监控数据还不够多,需要等待一段时间,让数据采集程序采集更多的数据。![](https://www.taosdata.com/blog/wp-content/uploads/2020/02/image-5-1024x853.png) + +如上两个监控面板分别监控了CPU和内存占用率。点击面板上的标题可以选择Edit进入编辑界面,新增监控数据。关于Grafana的监控面板设置,可以详细参考Grafana官网文档[Getting Started](https://grafana.com/docs/grafana/latest/guides/getting_started/)。 + +## 原理介绍 +按上面的操作,我们已经将监控系统搭建起来了,目前可以监控系统的CPU占有率了。下面介绍下这个Demo系统的工作原理。 +如下图所示,这个系统由数据采集功能(prometheus,telegraf),时序数据库功能(TDengine和适配程序),可视化功能(Grafana)组成。下面虚线框里的TDengine,blm_prometheus, blm_telegraf三个容器组成了一个schemaless写入的时序数据库,对于采用telegraf和prometheus作为采集程序的监控对象,可以直接将数据写入TDengine,并通过grafana进行可视化呈现。 +![architecture](https://www.taosdata.com/blog/wp-content/uploads/2020/02/image2020-1-29_21-22-6.png) +### 数据采集 +数据采集由Telegraf和Prometheus完成。Telegraf根据配置,从操作系统层面采集系统的相关统计值,并按配置上报给指定的URL,上报的数据json格式为 +```json +{ + "fields":{ + "usage_guest":0, + "usage_guest_nice":0, + "usage_idle":87.73726273726274, + "usage_iowait":0, + "usage_irq":0, + "usage_nice":0, + "usage_softirq":0, + "usage_steal":0, + "usage_system":2.6973026973026974, + "usage_user":9.565434565434565 + }, + "name":"cpu", + "tags":{ + "cpu":"cpu-total", + "host":"liutaodeMacBook-Pro.local" + }, + "timestamp":1571665100 +} +``` +其中name将被作为超级表的表名,tags作为普通表的tags,fields的名称也会作为一个tag用来描述普通表的标签。举个例子,一个普通表的结构如下,这是一个存储usage_softirq数据的普通表。 +![表结构](https://www.taosdata.com/blog/wp-content/uploads/2020/02/image2020-1-29_21-38-24.png) + +### Telegraf的配置 +对于使用telegraf作为数据采集程序的监控对象,可以在telegraf的配置文件telegraf.conf中将outputs.http部分的配置按以下配置修改,就可以直接将数据写入TDengine中了 +```toml +[[outputs.http]] +# ## URL is the address to send metrics to +url = "http://172.15.1.8:10202/telegraf" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# + +data_format = "json" +json_timestamp_units = "1ms" +``` +可以打开HTTP basic Auth验证机制,本Demo为了简化没有打开验证功能。 +对于多个被监控对象,只需要在telegraf.conf文件中都写上以上的配置内容,就可以将数据写入TDengine中了。 + +### Telegraf数据在TDengine中的存储结构 +Telegraf的数据在TDengine中的存储,是以数据name为超级表名,以tags值加上监控对象的ip地址,以及field的属性名作为tag值,存入TDengine中的。 +以name为cpu的数据为例,telegraf产生的数据为: +```json +{ + "fields":{ + "usage_guest":0, + "usage_guest_nice":0, + "usage_idle":87.73726273726274, + "usage_iowait":0, + "usage_irq":0, + "usage_nice":0, + "usage_softirq":0, + "usage_steal":0, + "usage_system":2.6973026973026974, + "usage_user":9.565434565434565 + }, + "name":"cpu", + "tags":{ + "cpu":"cpu-total", + "host":"liutaodeMacBook-Pro.local" + }, + "timestamp":1571665100 +} +``` +则写入TDengine时会自动存入一个名为cpu的超级表中,这个表的结构如下 +![telegraf表结构](https://www.taosdata.com/blog/wp-content/uploads/2020/02/image2020-2-2_0-37-49.png) +这个超级表的tag字段有cpu,host,srcip,field;其中cpu,host是原始数据携带的tag,而srcip是监控对象的IP地址,field是监控对象cpu类型数据中的fields属性,取值空间为[usage_guest,usage_guest_nice,usage_idle,usage_iowait,usage_irq,usage_nice,usage_softirq,usage_steal,usage_system,usage_user],每个field值对应着一个具体含义的数据。 + +因此,在查询的时候,可以用这些tag来过滤数据,也可以用超级表来聚合数据。 +### Prometheus的配置 +对于使用Prometheus作为数据采集程序的监控对象,可以在Prometheus的配置文件prometheus.yaml文件中,将remote write部分的配置按以下配置修改,就可以直接将数据写入TDengine中了。 +```yaml +remote_write: + - url: "http://172.15.1.7:10203/receive" +``` +对于多个被监控对象,只需要在每个被监控对象的prometheus配置中增加以上配置内容,就可以将数据写入TDengine中了。 +### Prometheus数据在TDengine中的存储结构 +Prometheus的数据在TDengine中的存储,与telegraf类似,也是以数据的name字段为超级表名,以数据的label作为tag值,存入TDengine中 +以prometheus_engine_queries这个数据为例[prom表结构](https://www.taosdata.com/blog/wp-content/uploads/2020/02/image2020-2-2_0-51-4.png) +在TDengine中会自动创建一个prometheus_engine_queries的超级表,tag字段为t_instance,t_job,t_monitor。 +查询时,可以用这些tag来过滤数据,也可以用超级表来聚合数据。 + +## 数据查询 +我们可以登陆到TDengine的客户端命令,通过命令行看看TDengine里面都存储了些什么数据,顺便也能体验一下TDengine的高性能查询。如何才能登陆到TDengine的客户端,我们可以通过以下几步来完成。 +首先通过下面的命令查询一下tdengine的Docker ID +```sh +docker container ls +``` +然后再执行 +```sh +docker exec -it tdengine的containerID bash +``` +就可以进入TDengine容器的命令行,执行taos,就进入以下界面![](https://www.taosdata.com/blog/wp-content/uploads/2020/02/image2020-1-29_21-55-53.png) +Telegraf的数据写入时,自动创建了一个名为telegraf的database,可以通过 +``` +use telegraf; +``` +使用telegraf这个数据库。然后执行show tables,describe table等命令详细查询下telegraf这个库里保存了些什么数据。 +具体TDengine的查询语句可以参考[TDengine官方文档](https://www.taosdata.com/cn/documentation/taos-sql/) +## 接入多个监控对象 +就像前面原理介绍的,这个miniDevops的小系统,已经提供了一个时序数据库和可视化系统,对于多台机器的监控,只需要将每台机器的telegraf或prometheus配置按上面所述修改,就可以完成监控数据采集和可视化呈现了。 diff --git a/minidevops/demodashboard.json b/minidevops/demodashboard.json new file mode 100644 index 0000000000000000000000000000000000000000..0b69ad9fe4a379f05f5a35e4a40f3e4ca2dd3178 --- /dev/null +++ b/minidevops/demodashboard.json @@ -0,0 +1,356 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 1, + "links": [], + "panels": [ + { + "datasource": null, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 6, + "options": { + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "color": { + "mode": "thresholds" + }, + "links": [ + { + "title": "", + "url": "" + } + ], + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [], + "values": false + }, + "orientation": "auto", + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "6.6.0", + "targets": [ + { + "refId": "A", + "sql": "select last_row(value) from telegraf.mem where field=\"used_percent\"" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory used percent", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 6, + "y": 0 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "MEMUSED-PERCENT", + "refId": "A", + "sql": "select avg(value) from telegraf.mem where field=\"used_percent\" interval(1m)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "MEM", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": null, + "gridPos": { + "h": 3, + "w": 18, + "x": 0, + "y": 8 + }, + "id": 10, + "options": { + "displayMode": "lcd", + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percent" + }, + "overrides": [], + "values": false + }, + "orientation": "auto", + "showUnfilled": true + }, + "pluginVersion": "6.6.0", + "targets": [ + { + "alias": "CPU-SYS", + "refId": "A", + "sql": "select last_row(value) from telegraf.cpu where field=\"usage_system\"" + }, + { + "alias": "CPU-IDLE", + "refId": "B", + "sql": "select last_row(value) from telegraf.cpu where field=\"usage_idle\"" + }, + { + "alias": "CPU-USER", + "refId": "C", + "sql": "select last_row(value) from telegraf.cpu where field=\"usage_user\"" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU-USED", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "TDengine", + "description": "General CPU monitor", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 18, + "x": 0, + "y": 11 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "CPU-USER", + "refId": "A", + "sql": "select avg(value) from telegraf.cpu where field=\"usage_user\" and cpu=\"cpu-total\" interval(1m)" + }, + { + "alias": "CPU-SYS", + "refId": "B", + "sql": "select avg(value) from telegraf.cpu where field=\"usage_system\" and cpu=\"cpu-total\" interval(1m)" + }, + { + "alias": "CPU-IDLE", + "refId": "C", + "sql": "select avg(value) from telegraf.cpu where field=\"usage_idle\" and cpu=\"cpu-total\" interval(1m)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 22, + "style": "dark", + "tags": [ + "demo" + ], + "templating": { + "list": [] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "TDengineDashboardDemo", + "uid": "2lF1wNUWz", + "version": 4 +} \ No newline at end of file diff --git a/minidevops/grafana/tdengine/README.md b/minidevops/grafana/tdengine/README.md new file mode 100644 index 0000000000000000000000000000000000000000..91dc73daf15c6d4979c41f61f96d87100ec948fc --- /dev/null +++ b/minidevops/grafana/tdengine/README.md @@ -0,0 +1,72 @@ +TDengine Datasource - build by Taosdata Inc. www.taosdata.com + +TDengine backend server implement 2 urls: + + * `/heartbeat` return 200 ok. Used for "Test connection" on the datasource config page. + * `/query` return data based on input sqls. + +## Installation + +To install this plugin: +Copy the data source you want to /var/lib/grafana/plugins/. Then restart grafana-server. The new data source should now be available in the data source type dropdown in the Add Data Source View. + +``` +cp -r /connector/grafana/tdengine /var/lib/grafana/plugins/ +sudo service grafana-server restart +``` + +### Query API + +Example request +``` javascript +[{ + "refId": "A", + "alias": "taosd-memory", + "sql": "select avg(mem_taosd) from sys.dn where ts > now-5m and ts < now interval(500a)" +}, +{ + "refId": "B", + "alias": "system-memory", + "sql": "select avg(mem_system) from sys.dn where ts > now-5m and ts < now interval(500a)" +}] +``` + +Example response +``` javascript +[{ + "datapoints": [ + [206.488281, 1538137825000], + [206.488281, 1538137855000], + [206.488281, 1538137885500], + [210.609375, 1538137915500], + [210.867188, 1538137945500] + ], + "refId": "A", + "target": "taosd-memory" +}, +{ + "datapoints": [ + [2910.218750, 1538137825000], + [2912.265625, 1538137855000], + [2912.437500, 1538137885500], + [2916.644531, 1538137915500], + [2917.066406, 1538137945500] + ], + "refId": "B", + "target": "system-memory" +}] +``` + +### Heartbeat API + +Example request +``` javascript + get request +``` + +Example response +``` javascript +{ + "message": "Grafana server receive a quest from you!" +} +``` diff --git a/minidevops/grafana/tdengine/css/query-editor.css b/minidevops/grafana/tdengine/css/query-editor.css new file mode 100644 index 0000000000000000000000000000000000000000..3b678b9f3689d2131d2224826872b8a75cc1c9fe --- /dev/null +++ b/minidevops/grafana/tdengine/css/query-editor.css @@ -0,0 +1,3 @@ +.generic-datasource-query-row .query-keyword { + width: 75px; +} \ No newline at end of file diff --git a/minidevops/grafana/tdengine/datasource.js b/minidevops/grafana/tdengine/datasource.js new file mode 100644 index 0000000000000000000000000000000000000000..14eb8a9b3604f02a91ebf2d8a2c5c4f5cbacedb3 --- /dev/null +++ b/minidevops/grafana/tdengine/datasource.js @@ -0,0 +1,170 @@ +'use strict'; + +System.register(['lodash'], function (_export, _context) { + "use strict"; + var _, _createClass, GenericDatasource; + + function strTrim(str) { + return str.replace(/^\s+|\s+$/gm,''); + } + + function _classCallCheck(instance, Constructor) { + if (!(instance instanceof Constructor)) { + throw new TypeError("Cannot call a class as a function"); + } + } + + return { + setters: [function (_lodash) { + _ = _lodash.default; + }], + execute: function () { + _createClass = function () { + function defineProperties(target, props) { + for (var i = 0; i < props.length; i++) { + var descriptor = props[i]; + descriptor.enumerable = descriptor.enumerable || false; + descriptor.configurable = true; + if ("value" in descriptor) descriptor.writable = true; + Object.defineProperty(target, descriptor.key, descriptor); + } + } + + return function (Constructor, protoProps, staticProps) { + if (protoProps) defineProperties(Constructor.prototype, protoProps); + if (staticProps) defineProperties(Constructor, staticProps); + return Constructor; + }; + }(); + + _export('GenericDatasource', GenericDatasource = function () { + function GenericDatasource(instanceSettings, $q, backendSrv, templateSrv) { + _classCallCheck(this, GenericDatasource); + + this.type = instanceSettings.type; + this.url = instanceSettings.url; + this.name = instanceSettings.name; + this.q = $q; + this.backendSrv = backendSrv; + this.templateSrv = templateSrv; + //this.withCredentials = instanceSettings.withCredentials; + this.headers = { 'Content-Type': 'application/json' }; + var taosuser = instanceSettings.jsonData.user; + var taospwd = instanceSettings.jsonData.password; + if (taosuser == null || taosuser == undefined || taosuser == "") { + taosuser = "root"; + } + if (taospwd == null || taospwd == undefined || taospwd == "") { + taospwd = "taosdata"; + } + + this.headers.Authorization = "Basic " + this.encode(taosuser + ":" + taospwd); + } + + _createClass(GenericDatasource, [{ + key: 'encode', + value: function encode(input) { + var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + var output = ""; + var chr1, chr2, chr3, enc1, enc2, enc3, enc4; + var i = 0; + while (i < input.length) { + chr1 = input.charCodeAt(i++); + chr2 = input.charCodeAt(i++); + chr3 = input.charCodeAt(i++); + enc1 = chr1 >> 2; + enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); + enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); + enc4 = chr3 & 63; + if (isNaN(chr2)) { + enc3 = enc4 = 64; + } else if (isNaN(chr3)) { + enc4 = 64; + } + output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4); + } + + return output; + } + }, { + key: 'generateSql', + value: function generateSql(sql, queryStart, queryEnd, intervalMs) { + if (queryStart == undefined || queryStart == null) { + queryStart = "now-1h"; + } + if (queryEnd == undefined || queryEnd == null) { + queryEnd = "now"; + } + if (intervalMs == undefined || intervalMs == null) { + intervalMs = "20000"; + } + + intervalMs += "a"; + sql = sql.replace(/^\s+|\s+$/gm, ''); + sql = sql.replace("$from", "'" + queryStart + "'"); + sql = sql.replace("$begin", "'" + queryStart + "'"); + sql = sql.replace("$to", "'" + queryEnd + "'"); + sql = sql.replace("$end", "'" + queryEnd + "'"); + sql = sql.replace("$interval", intervalMs); + + return sql; + } + }, { + key: 'query', + value: function query(options) { + var querys = new Array; + for (var i = 0; i < options.targets.length; ++i) { + var query = new Object; + + query.refId = options.targets[i].refId; + query.alias = options.targets[i].alias; + if (query.alias == null || query.alias == undefined) { + query.alias = ""; + } + + //query.sql = this.generateSql(options.targets[i].sql, options.range.raw.from, options.range.raw.to, options.intervalMs); + query.sql = this.generateSql(options.targets[i].sql, options.range.from.toISOString(), options.range.to.toISOString(), options.intervalMs); + console.log(query.sql); + + querys.push(query); + } + + if (querys.length <= 0) { + return this.q.when({ data: [] }); + } + + return this.doRequest({ + url: this.url + '/grafana/query', + data: querys, + method: 'POST' + }); + } + }, { + key: 'testDatasource', + value: function testDatasource() { + return this.doRequest({ + url: this.url + '/grafana/heartbeat', + method: 'GET' + }).then(function (response) { + if (response.status === 200) { + return { status: "success", message: "TDengine Data source is working", title: "Success" }; + } + }); + } + }, { + key: 'doRequest', + value: function doRequest(options) { + options.headers = this.headers; + //console.log(options); + return this.backendSrv.datasourceRequest(options); + } + }]); + + return GenericDatasource; + }()); + + _export('GenericDatasource', GenericDatasource); + } + }; +}); +//# sourceMappingURL=datasource.js.map diff --git a/minidevops/grafana/tdengine/img/taosdata_logo.png b/minidevops/grafana/tdengine/img/taosdata_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..88d3bacd09593f2735f279714f4a534624042838 Binary files /dev/null and b/minidevops/grafana/tdengine/img/taosdata_logo.png differ diff --git a/minidevops/grafana/tdengine/module.js b/minidevops/grafana/tdengine/module.js new file mode 100644 index 0000000000000000000000000000000000000000..8592cf2375564deba7c37fa11ab21f57dc85e843 --- /dev/null +++ b/minidevops/grafana/tdengine/module.js @@ -0,0 +1,51 @@ +'use strict'; + +System.register(['./datasource', './query_ctrl'], function (_export, _context) { + "use strict"; + + var GenericDatasource, GenericDatasourceQueryCtrl, GenericConfigCtrl, GenericQueryOptionsCtrl, GenericAnnotationsQueryCtrl; + + function _classCallCheck(instance, Constructor) { + if (!(instance instanceof Constructor)) { + throw new TypeError("Cannot call a class as a function"); + } + } + + return { + setters: [function (_datasource) { + GenericDatasource = _datasource.GenericDatasource; + }, function (_query_ctrl) { + GenericDatasourceQueryCtrl = _query_ctrl.GenericDatasourceQueryCtrl; + }], + execute: function () { + _export('ConfigCtrl', GenericConfigCtrl = function GenericConfigCtrl() { + _classCallCheck(this, GenericConfigCtrl); + }); + + GenericConfigCtrl.templateUrl = 'partials/config.html'; + + _export('QueryOptionsCtrl', GenericQueryOptionsCtrl = function GenericQueryOptionsCtrl() { + _classCallCheck(this, GenericQueryOptionsCtrl); + }); + + GenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html'; + + _export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl = function GenericAnnotationsQueryCtrl() { + _classCallCheck(this, GenericAnnotationsQueryCtrl); + }); + + GenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html'; + + _export('Datasource', GenericDatasource); + + _export('QueryCtrl', GenericDatasourceQueryCtrl); + + _export('ConfigCtrl', GenericConfigCtrl); + + _export('QueryOptionsCtrl', GenericQueryOptionsCtrl); + + _export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl); + } + }; +}); +//# sourceMappingURL=module.js.map diff --git a/minidevops/grafana/tdengine/partials/config.html b/minidevops/grafana/tdengine/partials/config.html new file mode 100644 index 0000000000000000000000000000000000000000..801a75327188e95fa437ee1aabe06e6d04101f0a --- /dev/null +++ b/minidevops/grafana/tdengine/partials/config.html @@ -0,0 +1,19 @@ +

TDengine Connection

+ +
+
+ Host + +
+ +
+
+ User + +
+
+ Password + +
+
+
\ No newline at end of file diff --git a/minidevops/grafana/tdengine/partials/query.editor.html b/minidevops/grafana/tdengine/partials/query.editor.html new file mode 100644 index 0000000000000000000000000000000000000000..4f16dc2aa93d6709c2bb7820ec9f4adec6a8fc7d --- /dev/null +++ b/minidevops/grafana/tdengine/partials/query.editor.html @@ -0,0 +1,58 @@ + + +
+
+ + +
+
+ +
+
+
+ + +
+
+
+ +
+
+ +
+
+ +
+
{{ctrl.lastGenerateSQL}}
+
+ +
+
Use any SQL that can return Resultset such as:
+- [[timestamp1, value1], [timestamp2, value2], ... ]
+
+Macros:
+- $from -> start timestamp of panel
+- $to -> stop timestamp of panel
+- $interval -> interval of panel
+
+Example of SQL:
+  SELECT count(*)
+  FROM db.table
+  WHERE ts > $from and ts < $to
+  INTERVAL ($interval)
+    
+
+ +
+
{{ctrl.lastQueryError}}
+
+ +
diff --git a/minidevops/grafana/tdengine/plugin.json b/minidevops/grafana/tdengine/plugin.json new file mode 100644 index 0000000000000000000000000000000000000000..6093703b700ed185dc8841c2c3e806ab9568e36f --- /dev/null +++ b/minidevops/grafana/tdengine/plugin.json @@ -0,0 +1,32 @@ +{ + "name": "TDengine", + "id": "tdengine", + "type": "datasource", + + "partials": { + "config": "partials/config.html" + }, + + "metrics": true, + "annotations": false, + "alerting": true, + + "info": { + "description": "TDengine datasource", + "author": { + "name": "Taosdata Inc.", + "url": "https://www.taosdata.com" + }, + "logos": { + "small": "img/taosdata_logo.png", + "large": "img/taosdata_logo.png" + }, + "version": "1.6.0", + "updated": "2019-07-01" + }, + + "dependencies": { + "grafanaVersion": "5.2.4", + "plugins": [ ] + } +} diff --git a/minidevops/grafana/tdengine/query_ctrl.js b/minidevops/grafana/tdengine/query_ctrl.js new file mode 100644 index 0000000000000000000000000000000000000000..fc9737238f1b637c0605d733e7bc04f770f5beef --- /dev/null +++ b/minidevops/grafana/tdengine/query_ctrl.js @@ -0,0 +1,91 @@ +'use strict'; + +System.register(['app/plugins/sdk'], function (_export, _context) { + "use strict"; + + var QueryCtrl, _createClass, GenericDatasourceQueryCtrl; + + function _classCallCheck(instance, Constructor) { + if (!(instance instanceof Constructor)) { + throw new TypeError("Cannot call a class as a function"); + } + } + + function _possibleConstructorReturn(self, call) { + if (!self) { + throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); + } + + return call && (typeof call === "object" || typeof call === "function") ? call : self; + } + + function _inherits(subClass, superClass) { + if (typeof superClass !== "function" && superClass !== null) { + throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); + } + + subClass.prototype = Object.create(superClass && superClass.prototype, { + constructor: { + value: subClass, + enumerable: false, + writable: true, + configurable: true + } + }); + if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; + } + + return { + setters: [function (_appPluginsSdk) { + QueryCtrl = _appPluginsSdk.QueryCtrl; + }, function (_cssQueryEditorCss) {}], + execute: function () { + _createClass = function () { + function defineProperties(target, props) { + for (var i = 0; i < props.length; i++) { + var descriptor = props[i]; + descriptor.enumerable = descriptor.enumerable || false; + descriptor.configurable = true; + if ("value" in descriptor) descriptor.writable = true; + Object.defineProperty(target, descriptor.key, descriptor); + } + } + + return function (Constructor, protoProps, staticProps) { + if (protoProps) defineProperties(Constructor.prototype, protoProps); + if (staticProps) defineProperties(Constructor, staticProps); + return Constructor; + }; + }(); + + _export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl = function (_QueryCtrl) { + _inherits(GenericDatasourceQueryCtrl, _QueryCtrl); + + function GenericDatasourceQueryCtrl($scope, $injector) { + _classCallCheck(this, GenericDatasourceQueryCtrl); + + var _this = _possibleConstructorReturn(this, (GenericDatasourceQueryCtrl.__proto__ || Object.getPrototypeOf(GenericDatasourceQueryCtrl)).call(this, $scope, $injector)); + + _this.scope = $scope; + return _this; + } + + _createClass(GenericDatasourceQueryCtrl, [{ + key: 'generateSQL', + value: function generateSQL(query) { + //this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.raw.from, this.panelCtrl.range.raw.to, this.panelCtrl.intervalMs); + this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.from.toISOString(), this.panelCtrl.range.to.toISOString(), this.panelCtrl.intervalMs); + this.showGenerateSQL = !this.showGenerateSQL; + } + }]); + + return GenericDatasourceQueryCtrl; + }(QueryCtrl)); + + _export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl); + + GenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html'; + } + }; +}); +//# sourceMappingURL=query_ctrl.js.map diff --git a/minidevops/prometheus/prometheus.yml b/minidevops/prometheus/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..0b057c8ee810b3542e058805ef32ada521474f9b --- /dev/null +++ b/minidevops/prometheus/prometheus.yml @@ -0,0 +1,36 @@ +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'codelab-monitor' + +remote_write: + - url: "http://172.15.1.7:10203/receive" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['localhost:9090'] + + # - job_name: 'example-random' + + # # Override the global default and scrape targets from this job every 5 seconds. + # scrape_interval: 5s + + # static_configs: + # - targets: ['172.17.0.6:8080', '172.17.0.6:8081'] + # labels: + # group: 'production' + + # - targets: ['172.17.0.6:8082'] + # labels: + # group: 'canary' diff --git a/minidevops/run.sh b/minidevops/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..15e74d0e5a0c23a60e434cf818fe92101b58fbb5 --- /dev/null +++ b/minidevops/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +#set -x +LP=`pwd` +#echo $LP + +docker rm -f `docker ps -a -q` +docker network rm minidevops +docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 minidevops + +#docker run -d --net="host" --pid="host" -v "/:/host:ro" quay.io/prometheus/node-exporter --path.rootfs=/host + +docker run -d --net minidevops --ip 172.15.1.11 -v $LP/grafana:/var/lib/grafana/plugins -p 3000:3000 grafana/grafana +#docker run -d --net minidevops --ip 172.15.1.11 -v /Users/tom/Documents/minidevops/grafana:/var/lib/grafana/plugins -p 3000:3000 grafana/grafana + +TDENGINE=`docker run -d --net minidevops --ip 172.15.1.6 -p 6030:6030 -p 6020:6020 -p 6031:6031 -p 6032:6032 -p 6033:6033 -p 6034:6034 -p 6035:6035 -p 6036:6036 -p 6037:6037 -p 6038:6038 -p 6039:6039 tdengine/tdengine:1.6.4.5` +docker cp /etc/localtime $TDENGINE:/etc/localtime + +BLMPROMETHEUS=`docker run -d --net minidevops --ip 172.15.1.7 -p 10203:10203 tdengine/blm_prometheus 172.15.1.6` + + +BLMPTELEGRAF=`docker run -d --net minidevops --ip 172.15.1.8 -p 10202:10202 tdengine/blm_telegraf 172.15.1.6` + +docker run -d --net minidevops --ip 172.15.1.9 -v $LP/prometheus:/etc/prometheus -p 9090:9090 prom/prometheus +#docker run -d --net minidevops --ip 172.15.1.9 -v /Users/tom/Documents/minidevops/prometheus:/etc/prometheus -p 9090:9090 prom/prometheus + +docker run -d --net minidevops --ip 172.15.1.10 -v $LP/telegraf:/etc/telegraf -p 8092:8092 -p 8094:8094 -p 8125:8125 telegraf +#docker run -d --net minidevops --ip 172.15.1.10 -v /Users/tom/Documents/minidevops/telegraf:/etc/telegraf -p 8092:8092 -p 8094:8094 -p 8125:8125 telegraf + + +sleep 10 +curl -X POST http://localhost:3000/api/datasources --header "Content-Type:application/json" -u admin:admin -d '{"Name": "TDengine","Type": "tdengine","TypeLogoUrl": "public/plugins/tdengine/img/taosdata_logo.png","Access": "proxy","Url": "http://172.15.1.6:6020","BasicAuth": false,"isDefault": true,"jsonData": {},"readOnly": false}' + +curl -X POST http://localhost:3000/api/dashboards/db --header "Content-Type:application/json" -u admin:admin -d '{"dashboard":{"annotations":{"list":[{"builtIn":1,"datasource":"-- Grafana --","enable":true,"hide":true,"iconColor":"rgba(0, 211, 255, 1)","name":"Annotations & Alerts","type":"dashboard"}]},"editable":true,"gnetId":null,"graphTooltip":0,"id":1,"links":[],"panels":[{"datasource":null,"gridPos":{"h":8,"w":6,"x":0,"y":0},"id":6,"options":{"fieldOptions":{"calcs":["mean"],"defaults":{"color":{"mode":"thresholds"},"links":[{"title":"","url":""}],"mappings":[],"max":100,"min":0,"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]},"unit":"percent"},"overrides":[],"values":false},"orientation":"auto","showThresholdLabels":false,"showThresholdMarkers":true},"pluginVersion":"6.6.0","targets":[{"refId":"A","sql":"select last_row(value) from telegraf.mem where field=\"used_percent\""}],"timeFrom":null,"timeShift":null,"title":"Memory used percent","type":"gauge"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":6,"y":0},"hiddenSeries":false,"id":8,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"dataLinks":[]},"percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"alias":"MEMUSED-PERCENT","refId":"A","sql":"select avg(value) from telegraf.mem where field=\"used_percent\" interval(1m)"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"MEM","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"datasource":null,"gridPos":{"h":3,"w":18,"x":0,"y":8},"id":10,"options":{"displayMode":"lcd","fieldOptions":{"calcs":["mean"],"defaults":{"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null}]},"unit":"percent"},"overrides":[],"values":false},"orientation":"auto","showUnfilled":true},"pluginVersion":"6.6.0","targets":[{"alias":"CPU-SYS","refId":"A","sql":"select last_row(value) from telegraf.cpu where field=\"usage_system\""},{"alias":"CPU-IDLE","refId":"B","sql":"select last_row(value) from telegraf.cpu where field=\"usage_idle\""},{"alias":"CPU-USER","refId":"C","sql":"select last_row(value) from telegraf.cpu where field=\"usage_user\""}],"timeFrom":null,"timeShift":null,"title":"CPU-USED","type":"bargauge"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"TDengine","description":"General CPU monitor","fill":1,"fillGradient":0,"gridPos":{"h":9,"w":18,"x":0,"y":11},"hiddenSeries":false,"id":2,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"dataLinks":[]},"percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"alias":"CPU-USER","refId":"A","sql":"select avg(value) from telegraf.cpu where field=\"usage_user\" and cpu=\"cpu-total\" interval(1m)"},{"alias":"CPU-SYS","refId":"B","sql":"select avg(value) from telegraf.cpu where field=\"usage_system\" and cpu=\"cpu-total\" interval(1m)"},{"alias":"CPU-IDLE","refId":"C","sql":"select avg(value) from telegraf.cpu where field=\"usage_idle\" and cpu=\"cpu-total\" interval(1m)"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"refresh":"10s","schemaVersion":22,"style":"dark","tags":["demo"],"templating":{"list":[]},"time":{"from":"now-3h","to":"now"},"timepicker":{"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"]},"timezone":"","title":"TDengineDashboardDemo","id":null,"uid":null,"version":0}}' + +#curl -X POST http://localhost:3000/api/dashboards/db --header "Content-Type:application/json" -u admin:admin -d '{"dashboard":{"annotations":{"list":[{"builtIn":1,"datasource":"-- Grafana --","enable":true,"hide":true,"iconColor":"rgba(0, 211, 255, 1)","name":"Annotations & Alerts","type":"dashboard"}]},"editable":true,"gnetId":null,"graphTooltip":0,"id":3,"links":[],"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"TDengine","description":"memory used percent","fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":0,"y":0},"hiddenSeries":false,"id":4,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"dataLinks":[]},"percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"alias":"memused-percent","refId":"A","sql":"select avg(value) from telegraf.mem where field=\"used_percent\" interval(1m)"},{"refId":"B","sql":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"MEM","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"TDengine","description":"General CPU monitor","fill":1,"fillGradient":0,"gridPos":{"h":9,"w":12,"x":0,"y":8},"hiddenSeries":false,"id":2,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"dataLinks":[]},"percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"alias":"CPU-USER","refId":"A","sql":"select avg(value) from telegraf.cpu where field=\"usage_user\" and cpu=\"cpu-total\" interval(1m)"},{"alias":"CPU-SYS","refId":"B","sql":"select avg(value) from telegraf.cpu where field=\"usage_system\" and cpu=\"cpu-total\" interval(1m)"},{"alias":"CPU-IDLE","refId":"C","sql":"select avg(value) from telegraf.cpu where field=\"usage_idle\" and cpu=\"cpu-total\" interval(1m)"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"refresh":"10s","schemaVersion":21,"style":"dark","tags":["demo"],"templating":{"list":[]},"time":{"from":"now-6h","to":"now"},"timepicker":{"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"]},"timezone":"","title":"TDengineDashboardDemo","id":null,"uid":null,"version":0}}' diff --git a/minidevops/telegraf/telegraf.conf b/minidevops/telegraf/telegraf.conf new file mode 100644 index 0000000000000000000000000000000000000000..05a77bdbc0a17f59a062d4d3ee04057b483bdeb6 --- /dev/null +++ b/minidevops/telegraf/telegraf.conf @@ -0,0 +1,5861 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 500 + + ## Maximum number of unwritten metrics per output. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log file name, the empty string means to log to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + + +# Configuration for sending metrics to InfluxDB +[[outputs.influxdb]] + ## The full HTTP or UDP URL for your InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] + + ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. + # database = "telegraf" + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the database tag will not be added to the metric. + # exclude_database_tag = false + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false + + ## Name of existing retention policy to write to. Empty string writes to + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" + + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false + + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_propery" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [[inputs.pubsub.attributes]] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx connection string. +# # See https://godoc.org/github.com/jackc/pgx#ParseDSN +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" # required. +# +# # The base endpoint URL can optionally be specified but it defaults to: +# #url = "https://app.datadoghq.com/api/v1/series" +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option. +# enable_sniffer = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to injest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## timeout in seconds for the write connection to graphite +# timeout = 2 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog(s) +# [[outputs.graylog]] +# ## UDP endpoint for your graylog instance. +# servers = ["127.0.0.1:12201", "192.168.1.1:12201"] + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP + [[outputs.http]] +# ## URL is the address to send metrics to +url = "http://172.15.1.8:10202/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +data_format = "json" +json_timestamp_units = "1ms" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" + + +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# urls = ["http://127.0.0.1:9999"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communcation to Instrumental +# debug = false + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" +# +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, its value will be used as the routing key +# routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. If set to "random", +# ## a random value will be generated for each message. +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## CompressionCodec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : No compression +# ## 1 : Gzip compression +# ## 2 : Snappy compression +# ## 3 : LZ4 compression +# # compression_codec = 0 +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## DEPRECATED: PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## DEPRECATED: If set the paritionKey will be a random UUID on every put. +# ## This allows for scaling across multiple shards in a stream. +# ## This will cause issues with ordering. +# use_random_partitionkey = false +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" +# +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librator API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" +# + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID, if not set a random ID is generated +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# ## Optional credentials +# # username = "" +# # password = "" +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# listen = ":9273" +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additonal resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognised metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explict sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognised field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for Wavefront server to send metrics to +# [[outputs.wavefront]] +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" +# +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 +# +# ## prefix for metrics keys +# #prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# #simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# #metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# #convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accpeted +# #use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# #use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# #source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# #convert_bool = true +# +# ## Define a mapping, namespaced by metric prefix, from string values to numeric values +# ## deprecated in 1.9; use the enum processor plugin +# #[[outputs.wavefront.string_to_number.elasticsearch]] +# # green = 1.0 +# # yellow = 0.5 +# # red = 0.0 + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map +# field = "status" +# +# ## Name of the tag to map +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # ## Field to change +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_agregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## The set of buckets. +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## The set of buckets. +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics. + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states. + report_active = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/cgroup/memory", +# # "/cgroup/memory/child1", +# # "/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespace (required) +# namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read metrics from one or many couchbase clusters +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-ee-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false +# +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...) and +# ## network (eth0, eth1, ...) stats or not +# perdevice = true +# ## Whether to report for each container total blkio and network stats or not +# total = false +# ## Which environment variables should we use as a tag +# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Read metrics about docker containers from Fargate/ECS v2 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url +# # endpoint_url = "http://169.254.170.2" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to also obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Reload and gather from file[s] on telegraf's interval. +# [[inputs.file]] +# ## Files to parse each interval. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only read the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' +# ## Server address (default http://localhost) +# # address = "http://localhost" +# +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional substring or regex match in body of the response +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. httpjson_webserver_stats +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Headers (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address (default: "https://localhost:5665") +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts, default "services") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to exclude from gathering +# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# +# ## Nodes to exclude from gathering +# # node_exclude = [ "node1", "node2" ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# # DEPRECATED: the jolokia plugin has been deprecated in favor of the +# # jolokia2 plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# ## NOTE that your jolokia security policy must allow for POST requests. +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## Attribute delimiter +# ## +# ## When multiple attributes are returned for a single +# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric +# ## name, and the attribute name, separated by the given delimiter. +# # delimiter = "_" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats", +# # "/proc/fs/lustre/obdfilter/*/job_stats", +# # ] +# # mds_procfiles = [ +# # "/proc/fs/lustre/mdt/*/md_stats", +# # "/proc/fs/lustre/mdt/*/job_stats", +# # ] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrives information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# servers = ["mongodb://127.0.0.1:27017"] +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## the limits for metrics form perf_events_statements +# perf_events_statements_digest_text_limit = 120 +# perf_events_statements_limit = 250 +# perf_events_statements_time_limit = 86400 +# # +# ## if the list is empty, then metrics are gathered from all databasee tables +# table_schema_databases = [] +# # +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# gather_table_schema = false +# # +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# gather_process_list = true +# # +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# gather_user_statistics = true +# # +# ## gather auto_increment columns and max values from information schema +# gather_info_schema_auto_inc = true +# # +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# gather_innodb_metrics = true +# # +# ## gather metrics from SHOW SLAVE STATUS command output +# gather_slave_status = true +# # +# ## gather metrics from SHOW BINARY LOGS command output +# gather_binary_logs = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# gather_table_io_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# gather_table_lock_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# gather_index_io_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# gather_event_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# gather_file_events_stats = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# gather_perf_events_statements = false +# # +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# interval_slow = "30m" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# # An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.cer" +# tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' full status information (ngx_http_status_module) +# [[inputs.nginx_plus]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# ## The default timeout of 1000ms can be overriden with (in milliseconds): +# timeout = 1000 + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## List of urls to ping +# urls = ["example.org"] +# +# ## Number of pings to send per collection (ping -c ) +# # count = 1 +# +# ## Interval, in s, at which to ping. 0 == default (ping -i ) +# # ping_interval = 1.0 +# +# ## Per-ping timeout, in s. 0 == no timeout (ping -W ) +# # timeout = 1.0 +# +# ## Total-ping deadline, in s. 0 == no deadline (ping -w ) +# # deadline = 10 +# +# ## Interface or source address to send ping from (ping -I[-S] ) +# # interface = "" +# +# ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'. +# # method = "exec" +# +# ## Specify the ping executable binary, default is "ping" +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, system binary will be used and +# ## other options (ping_interval, timeout, etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only ipv6 addresses when resolving hostnames. +# # ipv6 = false + + +# # Measure postfix queue statistics +# [[inputs.postfix]] +# ## Postfix queue directory. If not provided, telegraf will try to use +# ## 'postconf -h queue_directory' to determine it. +# # queue_directory = "/var/spool/postfix" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name +# # systemd_unit = "nginx.service" +# ## CGroup name or path +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Add PID as a tag instead of a field; useful to differentiate between +# ## processes whose tags are otherwise the same. Can create a large number +# ## of series, use judiciously. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Reads last_run_summary.yaml file and converts to measurments +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# ## +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# ## +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path = "/usr/bin/smartctl" +# +# ## On most platforms smartctl requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl. +# ## Sudo must be configured to to allow the telegraf user to run smartctl +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stoped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan) for S.M.A.R.T. devices will +# ## done and all found will be included except for the +# ## excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam" ] +# +# ## Timeout for the smartctl command to complete. +# # timeout = "30s" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# agents = [ "127.0.0.1:161" ] +# ## Timeout for each SNMP query. +# timeout = "5s" +# ## Number of retries to attempt within timeout. +# retries = 3 +# ## SNMP version, values can be 1, 2, or 3 +# version = 2 +# +# ## SNMP community string. +# community = "public" +# +# ## The GETBULK max-repetitions parameter +# max_repetitions = 10 +# +# ## SNMPv3 auth parameters +# #sec_name = "myuser" +# #auth_protocol = "md5" # Values: "MD5", "SHA", "" +# #auth_password = "pass" +# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" +# #context_name = "" +# #priv_protocol = "" # Values: "DES", "AES", "" +# #priv_password = "" +# +# ## measurement name +# name = "system" +# [[inputs.snmp.field]] +# name = "hostname" +# oid = ".1.0.0.1.1" +# [[inputs.snmp.field]] +# name = "uptime" +# oid = ".1.0.0.1.2" +# [[inputs.snmp.field]] +# name = "load" +# oid = ".1.0.0.1.3" +# [[inputs.snmp.field]] +# oid = "HOST-RESOURCES-MIB::hrMemorySize" +# +# [[inputs.snmp.table]] +# ## measurement name +# name = "remote_servers" +# inherit_tags = [ "hostname" ] +# [[inputs.snmp.table.field]] +# name = "server" +# oid = ".1.0.0.0.1.0" +# is_tag = true +# [[inputs.snmp.table.field]] +# name = "connections" +# oid = ".1.0.0.0.1.1" +# [[inputs.snmp.table.field]] +# name = "latency" +# oid = ".1.0.0.0.1.2" +# +# [[inputs.snmp.table]] +# ## auto populate table's fields using the MIB +# oid = "HOST-RESOURCES-MIB::hrNetworkTable" + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters. +# # servers = [ +# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# # ] +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## If you would like to exclude some of the metrics queries, list them here +# ## Possible choices: +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - DatabaseProperties +# ## - CPUHistory +# ## - DatabaseSize +# ## - DatabaseStats +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics +# ## - Schedulers +# ## - AzureDBResourceStats +# ## - AzureDBResourceGovernance +# ## - SqlRequests +# ## - ServerProperties +# exclude_query = [ 'Schedulers' ] + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default timeout of 1s can be overriden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timout +# # timeout = "5s" + + +# # A plugin to collect stats from Varnish HTTP Cache +# [[inputs.varnish]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## By default, telegraf gather stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] +# +# ## Optional name for the varnish instance (or working directory) to query +# ## Usually appened after -n in varnish cli +# # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# ## By default, don't gather zpool stats +# # poolMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_propery" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR +# [[inputs.cisco_telemetry_gnmi]] +# ## Address and port of the GNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## GNMI encoding requested (one of: "proto", "json", "json_ietf") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## GNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.cisco_telemetry_gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.cisco_telemetry_gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Influx HTTP write listener +# [[inputs.http_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# max_body_size = "500MiB" +# +# ## Maximum line size allowed to be sent in bytes. +# ## 0 means to use the default of 65536 bytes (64 kibibytes) +# max_line_size = "64KiB" +# +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# # path = "/telegraf" +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Influx HTTP write listener +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# max_body_size = "500MiB" +# +# ## Maximum line size allowed to be sent in bytes. +# ## 0 means to use the default of 65536 bytes (64 kibibytes) +# max_line_size = "64KiB" +# +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## MQTT broker URLs to be used. The format should be scheme://host:port, +# ## schema can be tcp, ssl, or ws. +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identity +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# # +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # Statsd UDP/TCP Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Stream a log file, like the tail -f command +# [[inputs.tail]] +# ## files to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# ## Read file from beginning. +# from_beginning = false +# ## Whether file is a named pipe +# pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Generic UDP listener +# [[inputs.udp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# ## Clusters +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default for Datastores only +# +# ## Datacenters +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default for Datastores only +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retreive per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## whether or not to force discovery of new objects on initial gather call before collecting metrics +# ## when true for large environments this may cause errors for time elapsed while collecting metrics +# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered +# # force_discover_on_init = false +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# [inputs.webhooks.particle] +# path = "/particle" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which Telegraf listens + diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 1e2fb4fed4031f4a2984edf96b19cac71457c58f..588f0650bd8b67033504d5f0b9c3f77cca24c54a 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -211,6 +211,12 @@ # whether to enable HTTP compression transmission # httpEnableCompress 0 +# the delayed time for launching each continuous query. 10% of the whole computing time window by default. +# streamCompDelayRatio 0.1 + +# the max allowed delayed time for launching continuous query. 20ms by default +# tsMaxStreamComputDelay 20000 + # whether the telegraf table name contains the number of tags and the number of fields # telegrafUseFieldNum 0 diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 8c55269bb9ac87112d19a120f57898b0e462cfd6..faa29aeac8d484fc1858afca73eda2a248169a46 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -2,7 +2,7 @@ # # Generate deb package for ubuntu set -e -#set -x +# set -x #curr_dir=$(pwd) compile_dir=$1 @@ -73,7 +73,7 @@ sed -i "2c$debver" ${pkg_dir}/DEBIAN/control if [ "$verMode" == "cluster" ]; then debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType} elif [ "$verMode" == "lite" ]; then - debname="TDengine-server-edge"-${tdengine_ver}-${osType}-${cpuType} + debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or lite" exit 1 diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..b01f375db0bc49e07ddb6b9d5e3c1692455857a7 --- /dev/null +++ b/packaging/docker/Dockerfile @@ -0,0 +1,18 @@ +FROM centos:7 + +WORKDIR /root + +COPY tdengine.tar.gz /root/ +RUN tar -zxf tdengine.tar.gz +WORKDIR /root/tdengine/ +RUN sh install.sh + + +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 +EXPOSE 6020 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 +EXPOSE 6043 6044 6045 6046 6047 6048 6049 6050 +CMD ["taosd"] +VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ] diff --git a/packaging/docker/dockerbuild.sh b/packaging/docker/dockerbuild.sh new file mode 100755 index 0000000000000000000000000000000000000000..280c27d7aa10248a73874518c30639b83203590b --- /dev/null +++ b/packaging/docker/dockerbuild.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -x +$1 +tar -zxf $1 +DIR=`echo $1|awk -F . '{print($1"."$2"."$3"."$4)}'` +mv $DIR tdengine +tar -czf tdengine.tar.gz tdengine +TMP=`echo $1|awk -F . '{print($2"."$3"."$4)}'` +TAG="1."$TMP +docker build --rm -f "Dockerfile" -t tdengine/tdengine:$TAG "." +docker login -u tdengine -p ******** #replace the docker registry username and password +docker push tdengine/tdengine:$TAG \ No newline at end of file diff --git a/packaging/release.sh b/packaging/release.sh index bdb18dde4cb366f7a520f67daf595d4ecb0ddf0c..378a7fe203d3c4997b7fe6b1800abdb32b95ff7a 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -3,7 +3,7 @@ # Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os set -e -# set -x +#set -x # releash.sh -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] @@ -46,8 +46,17 @@ done echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType}" curr_dir=$(pwd) -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/..)" + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/.. +fi + versioninfo="${top_dir}/src/util/src/version.c" csudo="" @@ -147,7 +156,14 @@ build_time=$(date +"%F %R") echo "char version[64] = \"${version}\";" > ${versioninfo} echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo} echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} -echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo} +if [ "$verMode" != "cluster" ]; then + echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo} +else + enterprise_dir="${top_dir}/../enterprise" + cd ${enterprise_dir} + echo "char gitinfoOfInternal[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} + cd ${curr_dir} +fi echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo} echo "" >> ${versioninfo} tmp_version=$(echo $version | tr -s "." "_") @@ -167,15 +183,23 @@ if [ -d ${compile_dir} ]; then ${csudo} rm -rf ${compile_dir} fi -${csudo} mkdir -p ${compile_dir} +if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${compile_dir} +else + mkdir -p ${compile_dir} +fi cd ${compile_dir} # check support cpu type if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then - cmake ../ -DCPUTYPE=${cpuType} + if [ "$verMode" != "cluster" ]; then + cmake ../ -DCPUTYPE=${cpuType} + else + cmake ../../ -DCPUTYPE=${cpuType} + fi else - echo "input cpuType=${cpuType} error!!!" - exit 1 + echo "input cpuType=${cpuType} error!!!" + exit 1 fi make @@ -187,28 +211,36 @@ cd ${curr_dir} #osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) #echo "osinfo: ${osinfo}" -echo "====do deb package for the ubuntu system====" -output_dir="${top_dir}/debs" -if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} -fi -${csudo} mkdir -p ${output_dir} -cd ${script_dir}/deb -${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} - -echo "====do rpm package for the centos system====" -output_dir="${top_dir}/rpms" -if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} +if [ "$osType" != "Darwin" ]; then + if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then + echo "====do deb package for the ubuntu system====" + output_dir="${top_dir}/debs" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/deb + ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} + + echo "====do rpm package for the centos system====" + output_dir="${top_dir}/rpms" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/rpm + ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} + fi + + echo "====do tar.gz package for all systems====" + cd ${script_dir}/tools + + ${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} + ${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} +else + cd ${script_dir}/tools + ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} fi -${csudo} mkdir -p ${output_dir} -cd ${script_dir}/rpm -${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} - -echo "====do tar.gz package for all systems====" -cd ${script_dir}/tools -${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} -${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} # 4. Clean up temporary compile directories #${csudo} rm -rf ${compile_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index e76e16084386026f44be89f6d242cccc7bf9b605..cc31a6b8d4103a9b03da71cb1cc041348e89f6ac 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -2,8 +2,8 @@ # # Generate rpm package for centos -#set -e -#set -x +set -e +# set -x #curr_dir=$(pwd) compile_dir=$1 @@ -66,7 +66,7 @@ cp_rpm_package ${pkg_dir}/RPMS if [ "$verMode" == "cluster" ]; then rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType} elif [ "$verMode" == "lite" ]; then - rpmname="TDengine-server-edge"-${tdengine_ver}-${osType}-${cpuType} + rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or lite" exit 1 diff --git a/packaging/rpm/taosd b/packaging/rpm/taosd index 6283c79383024df9ee39affcd2c85bfe27562b2a..46dd712e3139dad69d3db6db8b289d0f2424811a 100644 --- a/packaging/rpm/taosd +++ b/packaging/rpm/taosd @@ -26,7 +26,7 @@ MAX_OPEN_FILES=65535 # Default program options NAME=taosd -PROG=/usr/local/bin/taos/taosd +PROG=/usr/local/taos/bin/taosd USER=root GROUP=root diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh old mode 100755 new mode 100644 index ea3e16d345ed427d2f81abb14d66673d4df5cfbb..beca20e68de22ff6d53c17ec78ce2f6c0df761a3 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -6,6 +6,8 @@ set -e #set -x +verMode=lite + # -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory @@ -27,7 +29,12 @@ install_main_dir="/usr/local/taos" # old bin dir bin_dir="/usr/local/taos/bin" +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" + service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" # Color setting RED='\033[0;31m' @@ -41,6 +48,8 @@ if command -v sudo > /dev/null; then csudo="sudo" fi +update_flag=0 + initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then @@ -69,23 +78,24 @@ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) #echo "osinfo: ${osinfo}" os_type=0 if echo $osinfo | grep -qwi "ubuntu" ; then - echo "this is ubuntu system" + echo "This is ubuntu system" os_type=1 elif echo $osinfo | grep -qwi "debian" ; then - echo "this is debian system" + echo "This is debian system" os_type=1 elif echo $osinfo | grep -qwi "Kylin" ; then - echo "this is Kylin system" + echo "This is Kylin system" os_type=1 elif echo $osinfo | grep -qwi "centos" ; then - echo "this is centos system" + echo "This is centos system" os_type=2 elif echo $osinfo | grep -qwi "fedora" ; then - echo "this is fedora system" + echo "This is fedora system" os_type=2 else - echo "this is other linux system" - os_type=0 + echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " + echo "please feel free to contact taosdata.com for support." + os_type=1 fi function kill_taosd() { @@ -106,6 +116,9 @@ function install_main_path() { ${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/include ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi } function install_bin() { @@ -124,16 +137,30 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi } function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ "$verMode" == "cluster" ]; then + # Compatible with version 1.5 + ${csudo} mkdir -p ${v15_java_app_dir} + ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar + ${csudo} chmod 777 ${v15_java_app_dir} || : + fi } function install_header() { @@ -154,6 +181,57 @@ function install_config() { ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + + if [ "$verMode" == "cluster" ]; then + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + IP_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + IP_PATTERN="\b$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\b" + + echo + echo -e -n "${GREEN}Enter the IP address of an existing TDengine cluster node to join${NC} OR ${GREEN}leave it blank to build one${NC} :" + read masterIp + while true; do + if [ ! -z "$masterIp" ]; then + # check the format of the masterIp + if [[ $masterIp =~ $IP_PATTERN ]]; then + # Write the first IP to configuration file + sudo sed -i -r "s/#*\s*(masterIp\s*).*/\1$masterIp/" ${cfg_dir}/taos.cfg + + # Get the second IP address + + echo + echo -e -n "${GREEN}Enter the IP address of another node in cluster${NC} OR ${GREEN}leave it blank to skip${NC}: " + read secondIp + while true; do + + if [ ! -z "$secondIp" ]; then + if [[ $secondIp =~ $IP_PATTERN ]]; then + # Write the second IP to configuration file + sudo sed -i -r "s/#*\s*(secondIp\s*).*/\1$secondIp/" ${cfg_dir}/taos.cfg + break + else + read -p "Please enter the correct IP address: " secondIp + fi + else + break + fi + done + + break + else + read -p "Please enter the correct IP address: " masterIp + fi + else + break + fi + done + + fi } @@ -175,7 +253,9 @@ function install_connector() { } function install_examples() { - ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi } function clean_service_on_sysvinit() { @@ -240,7 +320,19 @@ function clean_service_on_systemd() { ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} -} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + + if systemctl is-active --quiet nginxd; then + echo "Nginx for TDengine is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi +} # taos:2345:respawn:/etc/init.d/taosd start @@ -269,6 +361,36 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi } function install_service() { @@ -363,6 +485,21 @@ function update_TDengine() { install_bin install_service install_config + + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + openresty_work=false + + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for TDengine is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" + fi + fi + fi echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" @@ -376,7 +513,15 @@ function update_TDengine() { echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" fi - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + if [ "$verMode" == "cluster" ]; then + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" else @@ -416,6 +561,20 @@ function install_TDengine() { # For installing new install_bin install_service + + if [ "$verMode" == "cluster" ]; then + openresty_work=false + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for TDengine is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" + fi + fi + fi + install_config # Ask if to start the service @@ -430,8 +589,17 @@ function install_TDengine() { else echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" fi - - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + + if [ "$verMode" == "cluster" ]; then + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + echo echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" else # Only install client @@ -450,6 +618,7 @@ function install_TDengine() { if [ -z $1 ]; then # Install server and client if [ -x ${bin_dir}/taosd ]; then + update_flag=1 update_TDengine else install_TDengine @@ -457,6 +626,7 @@ if [ -z $1 ]; then else # Only install client if [ -x ${bin_dir}/taos ]; then + update_flag=1 update_TDengine client else install_TDengine client diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 78d7b750458d7b777119315237efe574b330af6d..c5ecf5e5b943bc54e4602d873a1d374768e35387 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -7,18 +7,35 @@ set -e #set -x # -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) -# Dynamic directory -data_dir="/var/lib/taos" -log_dir="/var/log/taos" + +osType=Linux + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/taos" + log_dir="/var/log/taos" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/taos" + log_dir="~/TDengineLog" +fi log_link_dir="/usr/local/taos/log" cfg_install_dir="/etc/taos" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -inc_link_dir="/usr/include" +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi #install main path install_main_dir="/usr/local/taos" @@ -26,6 +43,8 @@ install_main_dir="/usr/local/taos" # old bin dir bin_dir="/usr/local/taos/bin" +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" # Color setting RED='\033[0;31m' @@ -51,9 +70,9 @@ function kill_client() { function install_main_path() { #create install main dir and all sub dir ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples @@ -61,51 +80,60 @@ function install_main_path() { } function install_bin() { - # Remove links - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : - ${csudo} rm -f ${bin_link_dir}/rmtaos || : + # Remove links + ${csudo} rm -f ${bin_link_dir}/taos || : + if [ "$osType" == "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/taosdump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmtaos || : - ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link - [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + if [ "$osType" == "Darwin" ]; then + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + fi + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : } function clean_lib() { - sudo rm -f /usr/lib/libtaos.so || : + sudo rm -f /usr/lib/libtaos.* || : sudo rm -rf ${lib_dir} || : } function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : + + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi } function install_header() { ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* - fi - + fi + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg } @@ -113,8 +141,12 @@ function install_config() { function install_log() { ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi ${csudo} ln -s ${log_dir} ${install_main_dir}/log } @@ -142,7 +174,7 @@ function update_TDengine() { kill_client sleep 1 fi - + install_main_path install_log @@ -152,7 +184,7 @@ function update_TDengine() { install_examples install_bin install_config - + echo echo -e "\033[44;32;1mTDengine client is updated successfully!${NC}" @@ -168,16 +200,16 @@ function install_TDengine() { tar -zxf taos.tar.gz echo -e "${GREEN}Start to install TDengine client...${NC}" - - install_main_path - install_log + + install_main_path + install_log install_header install_lib install_connector install_examples install_bin install_config - + echo echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" @@ -191,8 +223,8 @@ function install_TDengine() { if [ -e ${bin_dir}/taosd ]; then echo -e "\033[44;32;1mThere are already installed TDengine server, so don't need install client!${NC}" exit 0 - fi - + fi + if [ -x ${bin_dir}/taos ]; then update_flag=1 update_TDengine diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 25ab39777583e7e45934c4fd1c0606095890a11c..2200c7f13d20ecd19b75c8fe39185a3c69558f22 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -9,19 +9,37 @@ set -e # -----------------------Variables definition--------------------- source_dir=$1 binary_dir=$2 -script_dir=$(dirname $(readlink -f "$0")) +osType=$3 + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) +else + script_dir=${source_dir}/packaging/tools +fi + # Dynamic directory data_dir="/var/lib/taos" -log_dir="/var/log/taos" + +if [ "$osType" != "Darwin" ]; then + log_dir="/var/log/taos" +else + log_dir="~/TDengineLog" +fi data_link_dir="/usr/local/taos/data" log_link_dir="/usr/local/taos/log" cfg_install_dir="/etc/taos" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -inc_link_dir="/usr/include" +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi #install main path install_main_dir="/usr/local/taos" @@ -43,58 +61,61 @@ if command -v sudo > /dev/null; then csudo="sudo" fi -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 +if [ "$osType" != "Darwin" ]; then + + initd_mod=0 + service_mod=2 + if pidof systemd &> /dev/null; then + service_mod=0 + elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi else service_mod=2 fi -else - service_mod=2 -fi - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then - echo "this is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then - echo "this is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then - echo "this is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then - echo "this is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then - echo "this is fedora system" - os_type=2 -else - echo "this is other linux system" - os_type=0 + # get the operating system type for using the corresponding init file + # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification + #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) + #echo "osinfo: ${osinfo}" + os_type=0 + if echo $osinfo | grep -qwi "ubuntu" ; then + echo "this is ubuntu system" + os_type=1 + elif echo $osinfo | grep -qwi "debian" ; then + echo "this is debian system" + os_type=1 + elif echo $osinfo | grep -qwi "Kylin" ; then + echo "this is Kylin system" + os_type=1 + elif echo $osinfo | grep -qwi "centos" ; then + echo "this is centos system" + os_type=2 + elif echo $osinfo | grep -qwi "fedora" ; then + echo "this is fedora system" + os_type=2 + else + echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " + echo "please feel free to contact taosdata.com for support." + os_type=1 + fi fi function kill_taosd() { - pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo} kill -9 $pid || : - fi + pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function install_main_path() { @@ -107,37 +128,62 @@ function install_main_path() { ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/include - ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${install_main_dir}/init.d + fi } function install_bin() { # Remove links - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : - ${csudo} rm -f ${bin_link_dir}/rmtaos || : - - ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin - ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + ${csudo} rm -f ${bin_link_dir}/taos || : + + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/taosd || : + ${csudo} rm -f ${bin_link_dir}/taosdemo || : + ${csudo} rm -f ${bin_link_dir}/taosdump || : + fi + + ${csudo} rm -f ${bin_link_dir}/rmtaos || : + + ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin + + if [ "$osType" != "Darwin" ]; then + ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + else + ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin + fi ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + fi + + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + else + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + fi } function install_lib() { # Remove links - ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c) - ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + if [ "$osType" != "Darwin" ]; then + ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + else + ${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi } function install_header() { @@ -163,8 +209,13 @@ function install_config() { function install_log() { ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log } @@ -291,7 +342,9 @@ function install_service() { function update_TDengine() { echo -e "${GREEN}Start to update TDEngine...${NC}" # Stop the service if running - if pidof taosd &> /dev/null; then + + if [ "$osType" != "Darwin" ]; then + if pidof taosd &> /dev/null; then if ((${service_mod}==0)); then ${csudo} systemctl stop taosd || : elif ((${service_mod}==1)); then @@ -300,6 +353,7 @@ function update_TDengine() { kill_taosd fi sleep 1 + fi fi install_main_path @@ -310,32 +364,54 @@ function update_TDengine() { install_connector install_examples install_bin - install_service + + if [ "$osType" != "Darwin" ]; then + install_service + fi + install_config - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" - elif ((${service_mod}==1)); then + if [ "$osType" != "Darwin" ]; then + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo + + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" + elif ((${service_mod}==1)); then echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" - else - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" - fi + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + else + echo + echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}" + echo - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo -e "${GREEN_DARK}To access TDengine Client ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}" + fi } function install_TDengine() { # Start to install - echo -e "${GREEN}Start to install TDEngine...${NC}" - + if [ "$osType" != "Darwin" ]; then + echo -e "${GREEN}Start to install TDEngine...${NC}" + else + echo -e "${GREEN}Start to install TDEngine Client ...${NC}" + fi + install_main_path - install_data + + if [ "$osType" != "Darwin" ]; then + install_data + fi install_log install_header install_lib @@ -343,30 +419,41 @@ function install_TDengine() { install_examples install_bin - install_service + + if [ "$osType" != "Darwin" ]; then + install_service + fi + install_config - # Ask if to start the service - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + + if [ "$osType" != "Darwin" ]; then + # Ask if to start the service + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" else - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine Client is installed successfully!${NC}" fi - - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" } ## ==============================Main program starts from here============================ echo source directory: $1 echo binary directory: $2 -if [ -x ${bin_dir}/taosd ]; then +if [ -x ${bin_dir}/taos ]; then update_TDengine else install_TDengine diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index d206359160100a217b3f6f4be87f1274f80b2187..58ce6183c104f4042c106be092ac07242d22ac08 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -13,8 +13,15 @@ osType=$5 verMode=$6 verType=$7 -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi # create compressed install file. build_dir="${compile_dir}/build" @@ -22,13 +29,26 @@ code_dir="${top_dir}/src" release_dir="${top_dir}/release" #package_name='linux' -install_dir="${release_dir}/TDengine-client" + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/TDengine-enterprise-client" +else + install_dir="${release_dir}/TDengine-client" +fi # Directories and files. -bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" -lib_files="${build_dir}/lib/libtaos.so.${version}" + +if [ "$osType" != "Darwin" ]; then + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" cfg_dir="${top_dir}/packaging/cfg" + install_files="${script_dir}/install_client.sh" # make directories. @@ -38,10 +58,23 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cf mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f taos.tar.gz * --remove-files || : +else + tar -zcv -f taos.tar.gz * || : + mv taos.tar.gz .. + rm -rf ./* + mv ../taos.tar.gz . +fi cd ${curr_dir} -cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >> install_client_temp.sh + mv install_client_temp.sh ${install_dir}/install_client.sh +fi +chmod a+x ${install_dir}/install_client.sh # Copy example code mkdir -p ${install_dir}/examples @@ -60,7 +93,10 @@ cp ${lib_files} ${install_dir}/driver # Copy connector connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector -cp ${build_dir}/lib/*.jar ${install_dir}/connector + +if [ "$osType" != "Darwin" ]; then + cp ${build_dir}/lib/*.jar ${install_dir}/connector +fi cp -r ${connector_dir}/grafana ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/go ${install_dir}/connector @@ -75,7 +111,7 @@ cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${version}-${osType}-${cpuType} elif [ "$verMode" == "lite" ]; then - pkg_name=${install_dir}-edge-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${version}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or lite" exit 1 @@ -90,6 +126,13 @@ else exit 1 fi -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . +fi cd ${curr_dir} diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index a16da7f6c3d297ef3d348ce14ba95072caca5027..1b095a4c76693182acf2d71e2610e06a34a2e15c 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -23,7 +23,11 @@ code_dir="${top_dir}/src" release_dir="${top_dir}/release" #package_name='linux' -install_dir="${release_dir}/TDengine-server" +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/TDengine-enterprise-server" +else + install_dir="${release_dir}/TDengine-server" +fi # Directories and files. bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh" @@ -31,6 +35,7 @@ lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" cfg_dir="${top_dir}/packaging/cfg" install_files="${script_dir}/install.sh" +nginx_dir="${code_dir}/../../enterprise/src/modules/web" # Init file #init_dir=${script_dir}/deb @@ -50,11 +55,29 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $ mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm +if [ "$verMode" == "cluster" ]; then + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : +tar -zcv -f taos.tar.gz * --remove-files || : cd ${curr_dir} -cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=lite/verMode=cluster/g' ${install_dir}/install.sh >> install_temp.sh + mv install_temp.sh ${install_dir}/install.sh +fi +chmod a+x ${install_dir}/install.sh # Copy example code mkdir -p ${install_dir}/examples @@ -88,7 +111,7 @@ cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${version}-${osType}-${cpuType} elif [ "$verMode" == "lite" ]; then - pkg_name=${install_dir}-edge-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${version}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or lite" exit 1 @@ -103,6 +126,6 @@ else exit 1 fi -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : cd ${curr_dir} diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 3c9fd6bf7ff7b3098d900535a52a3e81f1368a1e..81507e1aa969ff11e92de6c9f1307c32197c9b2c 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -2,6 +2,11 @@ # # Script to stop the service and uninstall TDengine, but retain the config, data and log files. +set -e +#set -x + +verMode=lite + RED='\033[0;31m' GREEN='\033[1;32m' NC='\033[0m' @@ -14,10 +19,14 @@ cfg_link_dir="/usr/local/taos/cfg" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" service_config_dir="/etc/systemd/system" taos_service_name="taosd" - +nginx_service_name="nginxd" csudo="" if command -v sudo > /dev/null; then csudo="sudo" @@ -62,6 +71,7 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { @@ -90,6 +100,20 @@ function clean_service_on_systemd() { ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for TDengine is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi } function clean_service_on_sysvinit() { @@ -143,6 +167,7 @@ clean_config ${csudo} rm -rf ${data_link_dir} || : ${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) if echo $osinfo | grep -qwi "ubuntu" ; then diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 206de34c1f74325dd41773358bbe2c57690ca153..9210546a9f407fd821a176cadfcf88ae8023dc2f 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -17,6 +17,10 @@ bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" + +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" + csudo="" if command -v sudo > /dev/null; then csudo="sudo" @@ -39,6 +43,7 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index d3baf84d6754ad2edcf1ac193cd46020760553ea..92d6b61eb2473c790c967a4a0091e233de84b8fa 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -24,20 +24,10 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) #set version of .so #VERSION so version #SOVERSION api version - IF (TD_LITE) - execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) - execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c - OUTPUT_VARIABLE - VERSION_INFO) - MESSAGE(STATUS "build lite version ${VERSION_INFO}") - ELSE () - execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) - execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c - OUTPUT_VARIABLE - VERSION_INFO) - MESSAGE(STATUS "build cluster version ${VERSION_INFO}") - ENDIF () - + execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) + execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c + OUTPUT_VARIABLE + VERSION_INFO) MESSAGE(STATUS "build version ${VERSION_INFO}") SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1) @@ -51,10 +41,13 @@ ELSEIF (TD_WINDOWS_64) # generate dynamic library (*.dll) ADD_LIBRARY(taos SHARED ${SRC}) - SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def) + IF (NOT TD_GODLL) + SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def) + ENDIF () TARGET_LINK_LIBRARIES(taos trpc) ELSEIF (TD_DARWIN_64) + SET(CMAKE_MACOSX_RPATH 1) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) ADD_LIBRARY(taos_static STATIC ${SRC}) @@ -64,6 +57,17 @@ ELSEIF (TD_DARWIN_64) # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) TARGET_LINK_LIBRARIES(taos trpc tutil pthread m) - + + SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) + + #set version of .so + #VERSION so version + #SOVERSION api version + execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) + execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c + OUTPUT_VARIABLE + VERSION_INFO) + MESSAGE(STATUS "build version ${VERSION_INFO}") + SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1) ENDIF () diff --git a/src/client/inc/tscJoinProcess.h b/src/client/inc/tscJoinProcess.h index 89f29807ac900c1465eb6f506a7d9b2a50aaaef4..34764e4db62469af14592a026015c88b53a03fa5 100644 --- a/src/client/inc/tscJoinProcess.h +++ b/src/client/inc/tscJoinProcess.h @@ -27,7 +27,7 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql); void tscGetQualifiedTSList(SSqlObj* pSql, SJoinSubquerySupporter* p1, SJoinSubquerySupporter* p2, int32_t* num); void tscSetupOutputColumnIndex(SSqlObj* pSql); -int32_t tscLaunchSecondSubquery(SSqlObj* pSql); +int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql); void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code); SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index); @@ -121,7 +121,7 @@ STSBuf* tsBufCreate(bool autoDelete); STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete); STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder); -void tsBufDestory(STSBuf* pTSBuf); +void* tsBufDestory(STSBuf* pTSBuf); void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData, int32_t len); int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx); diff --git a/src/client/inc/tscSQLParser.h b/src/client/inc/tscSQLParser.h index 34faad525b6b5e8472403cb616c833cc57b78b0f..dd579d08c3c6b1465ce6b34e29f453c7b00827ec 100644 --- a/src/client/inc/tscSQLParser.h +++ b/src/client/inc/tscSQLParser.h @@ -21,17 +21,78 @@ extern "C" { #endif #include "taos.h" +#include "taosmsg.h" #include "tsqldef.h" #include "ttypes.h" -#include "taosmsg.h" + +enum _sql_cmd { + TSDB_SQL_SELECT = 1, + TSDB_SQL_FETCH, + TSDB_SQL_INSERT, + + TSDB_SQL_MGMT, // the SQL below is for mgmt node + TSDB_SQL_CREATE_DB, + TSDB_SQL_CREATE_TABLE, + TSDB_SQL_DROP_DB, + TSDB_SQL_DROP_TABLE, + TSDB_SQL_CREATE_ACCT, + TSDB_SQL_CREATE_USER, //10 + TSDB_SQL_DROP_ACCT, + TSDB_SQL_DROP_USER, + TSDB_SQL_ALTER_USER, + TSDB_SQL_ALTER_ACCT, + TSDB_SQL_ALTER_TABLE, + TSDB_SQL_ALTER_DB, + TSDB_SQL_CREATE_MNODE, + TSDB_SQL_DROP_MNODE, + TSDB_SQL_CREATE_DNODE, + TSDB_SQL_DROP_DNODE, // 20 + TSDB_SQL_CFG_DNODE, + TSDB_SQL_CFG_MNODE, + TSDB_SQL_SHOW, + TSDB_SQL_RETRIEVE, + TSDB_SQL_KILL_QUERY, + TSDB_SQL_KILL_STREAM, + TSDB_SQL_KILL_CONNECTION, + + TSDB_SQL_READ, // SQL below is for read operation + TSDB_SQL_CONNECT, + TSDB_SQL_USE_DB, // 30 + TSDB_SQL_META, + TSDB_SQL_METRIC, + TSDB_SQL_MULTI_META, + TSDB_SQL_HB, + + TSDB_SQL_LOCAL, // SQL below for client local + TSDB_SQL_DESCRIBE_TABLE, + TSDB_SQL_RETRIEVE_METRIC, + TSDB_SQL_METRIC_JOIN_RETRIEVE, + TSDB_SQL_RETRIEVE_TAGS, + + /* + * build empty result instead of accessing dnode to fetch result + * reset the client cache + */ + TSDB_SQL_RETRIEVE_EMPTY_RESULT, //40 + + TSDB_SQL_RESET_CACHE, + TSDB_SQL_SERV_STATUS, + TSDB_SQL_CURRENT_DB, + TSDB_SQL_SERV_VERSION, + TSDB_SQL_CLI_VERSION, + TSDB_SQL_CURRENT_USER, + TSDB_SQL_CFG_LOCAL, + + TSDB_SQL_MAX //48 +}; #define MAX_TOKEN_LEN 30 // token type enum { - TSQL_NODE_TYPE_EXPR = 0x1, - TSQL_NODE_TYPE_ID = 0x2, - TSQL_NODE_TYPE_VALUE = 0x4, + TSQL_NODE_TYPE_EXPR = 0x1, + TSQL_NODE_TYPE_ID = 0x2, + TSQL_NODE_TYPE_VALUE = 0x4, }; extern char tTokenTypeSwitcher[13]; @@ -72,72 +133,12 @@ typedef struct tFieldList { TAOS_FIELD *p; } tFieldList; -// sql operation type +// create table operation type enum TSQL_TYPE { - TSQL_CREATE_NORMAL_METER = 0x01, - TSQL_CREATE_NORMAL_METRIC = 0x02, - TSQL_CREATE_METER_FROM_METRIC = 0x04, - TSQL_CREATE_STREAM = 0x08, - TSQL_QUERY_METER = 0x10, - TSQL_INSERT = 0x20, - - DROP_DNODE = 0x40, - DROP_DATABASE = 0x41, - DROP_TABLE = 0x42, - DROP_USER = 0x43, - DROP_ACCOUNT = 0x44, - - USE_DATABASE = 0x50, - - // show operation - SHOW_DATABASES = 0x60, - SHOW_TABLES = 0x61, - SHOW_STABLES = 0x62, - SHOW_MNODES = 0x63, - SHOW_DNODES = 0x64, - SHOW_ACCOUNTS = 0x65, - SHOW_USERS = 0x66, - SHOW_VGROUPS = 0x67, - SHOW_QUERIES = 0x68, - SHOW_STREAMS = 0x69, - SHOW_CONFIGS = 0x6a, - SHOW_SCORES = 0x6b, - SHOW_MODULES = 0x6c, - SHOW_CONNECTIONS = 0x6d, - SHOW_GRANTS = 0x6e, - SHOW_VNODES = 0x6f, - - // create dnode - CREATE_DNODE = 0x80, - CREATE_DATABASE = 0x81, - CREATE_USER = 0x82, - CREATE_ACCOUNT = 0x83, - - DESCRIBE_TABLE = 0x90, - - ALTER_USER_PASSWD = 0xA0, - ALTER_USER_PRIVILEGES = 0xA1, - ALTER_DNODE = 0xA2, - ALTER_LOCAL = 0xA3, - ALTER_DATABASE = 0xA4, - ALTER_ACCT = 0xA5, - - // reset operation - RESET_QUERY_CACHE = 0xB0, - - // alter tags - ALTER_TABLE_TAGS_ADD = 0xC0, - ALTER_TABLE_TAGS_DROP = 0xC1, - ALTER_TABLE_TAGS_CHG = 0xC2, - ALTER_TABLE_TAGS_SET = 0xC4, - - // alter table column - ALTER_TABLE_ADD_COLUMN = 0xD0, - ALTER_TABLE_DROP_COLUMN = 0xD1, - - KILL_QUERY = 0xD2, - KILL_STREAM = 0xD3, - KILL_CONNECTION = 0xD4, + TSQL_CREATE_TABLE = 0x1, + TSQL_CREATE_STABLE = 0x2, + TSQL_CREATE_TABLE_FROM_STABLE = 0x3, + TSQL_CREATE_STREAM = 0x4, }; typedef struct SQuerySQL { @@ -157,33 +158,31 @@ typedef struct SQuerySQL { typedef struct SCreateTableSQL { struct SSQLToken name; // meter name, create table [meterName] xxx bool existCheck; - + + int8_t type; // create normal table/from super table/ stream struct { tFieldList *pTagColumns; // for normal table, pTagColumns = NULL; tFieldList *pColumns; } colInfo; struct { - SSQLToken metricName; // metric name, for using clause + SSQLToken stableName; // super table name, for using clause tVariantList *pTagVals; // create by using metric, tag value + STagData tagdata; } usingInfo; SQuerySQL *pSelect; - } SCreateTableSQL; typedef struct SAlterTableSQL { SSQLToken name; + int16_t type; + STagData tagData; + tFieldList * pAddColumns; - SSQLToken dropTagToken; tVariantList *varList; // set t=val or: change src dst } SAlterTableSQL; -typedef struct SInsertSQL { - SSQLToken name; - struct tSQLExprListList *pValue; -} SInsertSQL; - typedef struct SCreateDBInfo { SSQLToken dbname; int32_t replica; @@ -204,41 +203,68 @@ typedef struct SCreateDBInfo { } SCreateDBInfo; typedef struct SCreateAcctSQL { - int32_t users; - int32_t dbs; - int32_t tseries; - int32_t streams; - int32_t pps; - int64_t storage; - int64_t qtime; - int32_t conns; + int32_t maxUsers; + int32_t maxDbs; + int32_t maxTimeSeries; + int32_t maxStreams; + int32_t maxPointsPerSecond; + int64_t maxStorage; + int64_t maxQueryTime; + int32_t maxConnections; SSQLToken stat; } SCreateAcctSQL; +typedef struct SShowInfo { + uint8_t showType; + SSQLToken prefix; + SSQLToken pattern; +} SShowInfo; + +typedef struct SUserInfo { + SSQLToken user; + SSQLToken passwd; +// bool hasPasswd; + + SSQLToken privilege; +// bool hasPrivilege; + + int16_t type; +} SUserInfo; + typedef struct tDCLSQL { int32_t nTokens; /* Number of expressions on the list */ int32_t nAlloc; /* Number of entries allocated below */ SSQLToken *a; /* one entry for element */ + bool existsCheck; union { SCreateDBInfo dbOpt; SCreateAcctSQL acctOpt; + SShowInfo showOpt; + SSQLToken ip; }; + + SUserInfo user; + } tDCLSQL; +typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause + SQuerySQL **pClause; + int32_t numOfClause; +} SSubclauseInfo; + typedef struct SSqlInfo { - int32_t sqlType; - bool validSql; + int32_t type; + bool valid; union { SCreateTableSQL *pCreateTableInfo; - SInsertSQL * pInsertInfo; SAlterTableSQL * pAlterInfo; - SQuerySQL * pQueryInfo; tDCLSQL * pDCLInfo; }; - char pzErrMsg[256]; + SSubclauseInfo subclauseInfo; + char pzErrMsg[256]; } SSqlInfo; typedef struct tSQLExpr { @@ -338,31 +364,39 @@ SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection, SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName, tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type); -void tSQLExprDestroy(tSQLExpr *); -void tSQLExprNodeDestroy(tSQLExpr *pExpr); + +void tSQLExprNodeDestroy(tSQLExpr *pExpr); tSQLExpr *tSQLExprNodeClone(tSQLExpr *pExpr); SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type); tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList); -void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList); +void destroyAllSelectClause(SSubclauseInfo *pSql); +void doDestroyQuerySql(SQuerySQL *pSql); -void destroyQuerySql(SQuerySQL *pSql); +SSqlInfo * setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type); +SSubclauseInfo *setSubclause(SSubclauseInfo *pClause, void *pSqlExprInfo); -void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type); +SSubclauseInfo *appendSelectClause(SSubclauseInfo *pInfo, void *pSubclause); void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists); void SQLInfoDestroy(SSqlInfo *pInfo); void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...); +void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck); +void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns); tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken); void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists); void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo); +void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd); +void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip); +void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege); + void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo); // prefix show db.tables; diff --git a/src/client/inc/tscSecondaryMerge.h b/src/client/inc/tscSecondaryMerge.h index 0c6472f6b367857edbdc92a08e0bc8a263572ee1..bcfe14fcb79e79b6d3965dfd5970421819f27b23 100644 --- a/src/client/inc/tscSecondaryMerge.h +++ b/src/client/inc/tscSecondaryMerge.h @@ -120,7 +120,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd void tscDestroyLocalReducer(SSqlObj *pSql); -int32_t tscLocalDoReduce(SSqlObj *pSql); +int32_t tscDoLocalreduce(SSqlObj *pSql); #ifdef __cplusplus } diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index d1b8dcfed967ae79c7b94b66a8db3239c20d0d68..9868f703c37f10c1c15df576899d7f2724bdb196 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -29,9 +29,9 @@ extern "C" { #include "tsclient.h" #include "tsdb.h" -#define UTIL_METER_IS_METRIC(metaInfo) \ +#define UTIL_METER_IS_SUPERTABLE(metaInfo) \ (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC)) -#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo))) +#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_SUPERTABLE(metaInfo))) #define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \ (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE)) @@ -67,7 +67,7 @@ typedef struct SJoinSubquerySupporter { } SJoinSubquerySupporter; int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name, - STableDataBlocks** dataBlocks); + SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks); void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks); void tscDestroyDataBlock(STableDataBlocks* pDataBlock); @@ -81,7 +81,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); void tscFreeUnusedDataBlocks(SDataBlockList* pList); int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList); int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, const char* tableId, + int32_t startOffset, int32_t rowSize, const char* tableId, SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks); SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); @@ -95,23 +95,27 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); * @param pSql sql object * @return */ -bool tscIsPointInterpQuery(SSqlCmd* pCmd); -bool tscIsTWAQuery(SSqlCmd* pCmd); -bool tscProjectionQueryOnMetric(SSqlCmd* pCmd); -bool tscProjectionQueryOnTable(SSqlCmd* pCmd); +bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo); +bool tscIsTWAQuery(SQueryInfo* pQueryInfo); -bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd); +bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex); +bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex); +bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex); + +bool tscProjectionQueryOnTable(SQueryInfo* pQueryInfo); + +bool tscIsTwoStageMergeMetricQuery(SQueryInfo* pQueryInfo, int32_t tableIndex); bool tscQueryOnMetric(SSqlCmd* pCmd); -bool tscQueryMetricTags(SSqlCmd* pCmd); +bool tscQueryMetricTags(SQueryInfo* pQueryInfo); bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd); -void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, +void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t isTag); -void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex); +void addRequiredTagColumn(SQueryInfo* pQueryInfo, int32_t tagColIndex, int32_t tableIndex); -int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex); -void tscClearInterpInfo(SSqlCmd* pCmd); +int32_t setMeterID(SMeterMetaInfo* pMeterMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql); +void tscClearInterpInfo(SQueryInfo* pQueryInfo); bool tscIsInsertOrImportData(char* sqlstr); @@ -125,29 +129,33 @@ void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIE void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, const char* name, int16_t bytes); void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible); -void tscFieldInfoCalOffset(SSqlCmd* pCmd); -void tscFieldInfoUpdateOffset(SSqlCmd* pCmd); +void tscFieldInfoCalOffset(SQueryInfo* pQueryInfo); +void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo); void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList, int32_t size); -void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst); +void tscFieldInfoCopyAll(SFieldInfo* dst, SFieldInfo* src); -TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index); -int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index); -int32_t tscGetResRowLength(SSqlCmd* pCmd); +TAOS_FIELD* tscFieldInfoGetField(SQueryInfo* pQueryInfo, int32_t index); +int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); +int32_t tscGetResRowLength(SQueryInfo* pQueryInfo); void tscClearFieldInfo(SFieldInfo* pFieldInfo); +int32_t tscNumOfFields(SQueryInfo* pQueryInfo); +int32_t tscFieldInfoCompare(SFieldInfo* pFieldInfo1, SFieldInfo* pFieldInfo2); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); -SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t interSize); -SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId); +SSqlExpr* tscSqlExprInsertEmpty(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId); -SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, +SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size); -SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index); +SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index); void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid); +void* tscSqlExprDestroy(SSqlExpr* pExpr); +void tscSqlExprInfoDestroy(SSqlExprInfo* pExprInfo); -SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* colIndex); +SColumnBase* tscColumnBaseInfoInsert(SQueryInfo* pQueryInfo, SColumnIndex* colIndex); void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src); void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src); @@ -162,7 +170,7 @@ int32_t tscValidateName(SSQLToken* pToken); void tscIncStreamExecutionCount(void* pStream); -bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId); +bool tscValidateColumnId(SMeterMetaInfo* pMeterMetaInfo, int32_t colId); // get starter position of metric query condition (query on tags) in SSqlCmd.payload SCond* tsGetMetricQueryCondPos(STagCond* pCond, uint64_t tableIndex); @@ -171,30 +179,38 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); void tscTagCondCopy(STagCond* dest, const STagCond* src); void tscTagCondRelease(STagCond* pCond); -void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd); +void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo); void tscSetFreeHeatBeat(STscObj* pObj); bool tscShouldFreeHeatBeat(SSqlObj* pHb); void tscCleanSqlCmd(SSqlCmd* pCmd); bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql); -void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache); -SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index); -SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index); +void tscRemoveAllMeterMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache); +SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex); +SMeterMetaInfo* tscGetMeterMetaInfoFromQueryInfo(SQueryInfo *pQueryInfo, int32_t tableIndex); + +SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex); +int32_t tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo); + +SMeterMetaInfo* tscGetMeterMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index); void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache); -SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, +SMeterMetaInfo* tscAddMeterMetaInfo(SQueryInfo* pQueryInfo, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, int16_t numOfTags, int16_t* tags); -SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd); +SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SQueryInfo *pQueryInfo); +int32_t tscAddSubqueryInfo(SSqlCmd *pCmd); +void tscFreeSubqueryInfo(SSqlCmd* pCmd); +void tscClearSubqueryInfo(SSqlCmd* pCmd); -void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr, uint64_t uid); -int tscGetMetricMeta(SSqlObj* pSql); -int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex); -int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists); +void tscGetMetricMetaCacheKey(SQueryInfo* pQueryInfo, char* keyStr, uint64_t uid); +int tscGetMetricMeta(SSqlObj* pSql, int32_t clauseIndex); +int tscGetMeterMeta(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo); +int tscGetMeterMetaEx(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo, bool createIfNotExists); void tscResetForNextRetrieve(SSqlRes* pRes); -void tscAddTimestampColumn(SSqlCmd* pCmd, int16_t functionId, int16_t tableIndex); +void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex); void tscDoQuery(SSqlObj* pSql); /** @@ -215,9 +231,9 @@ void tscDoQuery(SSqlObj* pSql); * @return */ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql); -void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex); +void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex); -void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex); +void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex); int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid); @@ -226,7 +242,13 @@ TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void sortRemoveDuplicates(STableDataBlocks* dataBuf); -void tscPrintSelectClause(SSqlCmd* pCmd); +void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); + +bool hasMoreVnodesToTry(SSqlObj *pSql); +void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp); +void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows); +void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()); + #ifdef __cplusplus } diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 6adf2f1be161bc73de1891175a56c9b34f26072f..fcbd3dac1f27a2bfadc470fcbab7075c1fab6246 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -20,14 +20,6 @@ extern "C" { #endif -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taos.h" #include "taosmsg.h" @@ -39,70 +31,9 @@ extern "C" { #include "tsqlfunction.h" #include "tutil.h" -#define TSC_GET_RESPTR_BASE(res, cmd, col, ord) \ - ((res->data + tscFieldInfoGetOffset(cmd, col) * res->numOfRows) + \ - (1 - ord.order) * (res->numOfRows - 1) * tscFieldInfoGetField(cmd, col)->bytes) - -enum _sql_cmd { - TSDB_SQL_SELECT, - TSDB_SQL_FETCH, - TSDB_SQL_INSERT, - - TSDB_SQL_MGMT, // the SQL below is for mgmt node - TSDB_SQL_CREATE_DB, - TSDB_SQL_CREATE_TABLE, - TSDB_SQL_DROP_DB, - TSDB_SQL_DROP_TABLE, - TSDB_SQL_CREATE_ACCT, - TSDB_SQL_CREATE_USER, - TSDB_SQL_DROP_ACCT, // 10 - TSDB_SQL_DROP_USER, - TSDB_SQL_ALTER_USER, - TSDB_SQL_ALTER_ACCT, - TSDB_SQL_ALTER_TABLE, - TSDB_SQL_ALTER_DB, - TSDB_SQL_CREATE_MNODE, - TSDB_SQL_DROP_MNODE, - TSDB_SQL_CREATE_DNODE, - TSDB_SQL_DROP_DNODE, - TSDB_SQL_CFG_DNODE, // 20 - TSDB_SQL_CFG_MNODE, - TSDB_SQL_SHOW, - TSDB_SQL_RETRIEVE, - TSDB_SQL_KILL_QUERY, - TSDB_SQL_KILL_STREAM, - TSDB_SQL_KILL_CONNECTION, - - TSDB_SQL_READ, // SQL below is for read operation - TSDB_SQL_CONNECT, - TSDB_SQL_USE_DB, - TSDB_SQL_META, // 30 - TSDB_SQL_METRIC, - TSDB_SQL_MULTI_META, - TSDB_SQL_HB, - - TSDB_SQL_LOCAL, // SQL below for client local - TSDB_SQL_DESCRIBE_TABLE, - TSDB_SQL_RETRIEVE_METRIC, - TSDB_SQL_METRIC_JOIN_RETRIEVE, - TSDB_SQL_RETRIEVE_TAGS, - /* - * build empty result instead of accessing dnode to fetch result - * reset the client cache - */ - TSDB_SQL_RETRIEVE_EMPTY_RESULT, - - TSDB_SQL_RESET_CACHE, // 40 - TSDB_SQL_SERV_STATUS, - TSDB_SQL_CURRENT_DB, - TSDB_SQL_SERV_VERSION, - TSDB_SQL_CLI_VERSION, - TSDB_SQL_CURRENT_USER, - TSDB_SQL_CFG_LOCAL, - - TSDB_SQL_MAX -}; - +#define TSC_GET_RESPTR_BASE(res, _queryinfo, col, ord) \ + (res->data + tscFieldInfoGetOffset(_queryinfo, col) * res->numOfRows) + // forward declaration struct SSqlInfo; @@ -115,17 +46,17 @@ typedef struct SSqlGroupbyExpr { } SSqlGroupbyExpr; typedef struct SMeterMetaInfo { - SMeterMeta * pMeterMeta; // metermeta - SMetricMeta *pMetricMeta; // metricmeta - + SMeterMeta * pMeterMeta; // metermeta + SMetricMeta *pMetricMeta; // metricmeta + /* * 1. keep the vnode index during the multi-vnode super table projection query * 2. keep the vnode index for multi-vnode insertion */ - int32_t vnodeIndex; - char name[TSDB_METER_ID_LEN + 1]; // table(super table) name - int16_t numOfTags; // total required tags in query, including groupby tags - int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection + int32_t vnodeIndex; + char name[TSDB_METER_ID_LEN + 1]; // table(super table) name + int16_t numOfTags; // total required tags in query, including groupby tags + int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection } SMeterMetaInfo; /* the structure for sql function in select clause */ @@ -182,13 +113,6 @@ typedef struct SColumnBaseInfo { struct SLocalReducer; -// todo move to utility -typedef struct SString { - int32_t alloc; - int32_t n; - char * z; -} SString; - typedef struct SCond { uint64_t uid; char * cond; @@ -230,23 +154,24 @@ typedef struct SParamInfo { } SParamInfo; typedef struct STableDataBlocks { - char meterId[TSDB_METER_ID_LEN]; - int8_t tsSource; // where does the UNIX timestamp come from, server or client - bool ordered; // if current rows are ordered or not - int64_t vgid; // virtual group id - int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending - int32_t numOfMeters; // number of tables in current submit block - - int32_t rowSize; // row size for current table + char meterId[TSDB_METER_ID_LEN]; + int8_t tsSource; // where does the UNIX timestamp come from, server or client + bool ordered; // if current rows are ordered or not + int64_t vgid; // virtual group id + int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending + int32_t numOfMeters; // number of tables in current submit block + + int32_t rowSize; // row size for current table uint32_t nAllocSize; + uint32_t headerSize; // header for metadata (submit metadata) uint32_t size; - + /* * the metermeta for current table, the metermeta will be used during submit stage, keep a ref * to avoid it to be removed from cache */ - SMeterMeta* pMeterMeta; - + SMeterMeta *pMeterMeta; + union { char *filename; char *pData; @@ -267,52 +192,69 @@ typedef struct SDataBlockList { STableDataBlocks **pData; } SDataBlockList; -typedef struct { - SOrderVal order; - int command; - int count; // TODO refactor +typedef struct SQueryInfo { + int16_t command; // the command may be different for each subclause, so keep it seperately. + uint16_t type; // query/insert/import type + char intervalTimeUnit; - union { - bool existsCheck; // check if the table exists - int8_t showType; // show command type - }; - - int8_t isInsertFromFile; // load data from file or not - bool import; // import/insert type - uint8_t msgType; - uint16_t type; // query type - char intervalTimeUnit; int64_t etime, stime; int64_t nAggTimeInterval; // aggregation time interval int64_t nSlidingTime; // sliding window in mseconds SSqlGroupbyExpr groupbyExpr; // group by tags info - /* - * use to keep short request msg and error msg, in such case, SSqlCmd->payload == SSqlCmd->ext; - * create table/query/insert operations will exceed the TSDB_SQLCMD_SIZE. - * - * In such cases, allocate the memory dynamically, and need to free the memory - */ - uint32_t allocSize; - char * payload; - int payloadLen; - short numOfCols; - SColumnBaseInfo colList; - SFieldInfo fieldsInfo; - SSqlExprInfo exprsInfo; - SLimitVal limit; - SLimitVal slimit; - int64_t globalLimit; - STagCond tagCond; - int16_t interpoType; // interpolate type - int16_t numOfTables; - - // submit data blocks branched according to vnode - SDataBlockList * pDataBlocks; + SColumnBaseInfo colList; + SFieldInfo fieldsInfo; + SSqlExprInfo exprsInfo; + SLimitVal limit; + SLimitVal slimit; + STagCond tagCond; + SOrderVal order; + int16_t interpoType; // interpolate type + int16_t numOfTables; SMeterMetaInfo **pMeterInfo; struct STSBuf * tsBuf; - // todo use dynamic allocated memory for defaultVal - int64_t defaultVal[TSDB_MAX_COLUMNS]; // default value for interpolation + int64_t * defaultVal; // default value for interpolation + char * msg; // pointer to the pCmd->payload to keep error message temporarily + int64_t clauseLimit; // limit for current sub clause + + // offset value in the original sql expression, NOT sent to virtual node, only applied at client side + int64_t prjOffset; +} SQueryInfo; + +// data source from sql string or from file +enum { + DATA_FROM_SQL_STRING = 1, + DATA_FROM_DATA_FILE = 2, +}; + +typedef struct { + int command; + uint8_t msgType; + + union { + bool existsCheck; // check if the table exists or not + bool inStream; // denote if current sql is executed in stream or not + bool createOnDemand; // if the table is missing, on-the-fly create it. during getmeterMeta + int8_t dataSourceType; // load data from file or not + }; + + union { + int32_t count; + int32_t numOfTablesInSubmit; + }; + + int32_t clauseIndex; // index of multiple subclause query + int8_t isParseFinish; + short numOfCols; + uint32_t allocSize; + char * payload; + int payloadLen; + + SQueryInfo **pQueryInfo; + int32_t numOfClause; + + // submit data blocks branched according to vnode + SDataBlockList *pDataBlocks; // for parameter ('?') binding and batch processing int32_t batchSize; @@ -328,12 +270,15 @@ struct STSBuf; typedef struct { uint8_t code; - int numOfRows; // num of results in current retrieved - int numOfTotal; // num of total results + int64_t numOfRows; // num of results in current retrieved + int64_t numOfTotal; // num of total results + int64_t numOfTotalInCurrentClause; // num of total result in current subclause + char * pRsp; int rspType; int rspLen; uint64_t qhandle; + int64_t uid; int64_t useconds; int64_t offset; // offset value from vnode during projection query of stable int row; @@ -380,6 +325,7 @@ typedef struct _sql_obj { uint32_t queryId; void * thandle; void * pStream; + void * pSubscription; char * sqlstr; char retry; char maxRetry; @@ -391,8 +337,8 @@ typedef struct _sql_obj { SSqlCmd cmd; SSqlRes res; uint8_t numOfSubs; - char* asyncTblPos; - void* pTableHashList; + char * asyncTblPos; + void * pTableHashList; struct _sql_obj **pSubs; struct _sql_obj * prev, *next; } SSqlObj; @@ -432,9 +378,11 @@ typedef struct { } SIpStrList; // tscSql API -int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion); +int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion); + +void tscInitMsgs(); +extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo); -void tscInitMsgs(); void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle); int tscProcessSql(SSqlObj *pSql); @@ -453,15 +401,22 @@ int taos_retrieve(TAOS_RES *res); * transfer function for metric query in stream computing, the function need to be change * before send query message to vnode */ -int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd *pCmd); -void tscRestoreSQLFunctionForMetricQuery(SSqlCmd *pCmd); +int32_t tscTansformSQLFunctionForSTableQuery(SQueryInfo *pQueryInfo); +void tscRestoreSQLFunctionForMetricQuery(SQueryInfo *pQueryInfo); void tscClearSqlMetaInfoForce(SSqlCmd *pCmd); -int32_t tscCreateResPointerInfo(SSqlCmd *pCmd, SSqlRes *pRes); +int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo); void tscDestroyResPointerInfo(SSqlRes *pRes); void tscFreeSqlCmdData(SSqlCmd *pCmd); +void tscFreeResData(SSqlObj* pSql); + +/** + * free query result of the sql object + * @param pObj + */ +void tscFreeSqlResult(SSqlObj* pSql); /** * only free part of resources allocated during query. @@ -479,12 +434,14 @@ void tscFreeSqlObj(SSqlObj *pObj); void tscCloseTscObj(STscObj *pObj); -void tscProcessMultiVnodesInsert(SSqlObj *pSql); -void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql); -void tscKillMetricQuery(SSqlObj *pSql); -void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); -bool tscIsUpdateQuery(STscObj *pObj); -bool tscHasReachLimitation(SSqlObj* pSql); +void tscProcessMultiVnodesInsert(SSqlObj *pSql); +void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql); +void tscKillMetricQuery(SSqlObj *pSql); +void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); +bool tscIsUpdateQuery(STscObj *pObj); +bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes); + +char *tscGetErrorMsgPayload(SSqlCmd *pCmd); int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); @@ -506,6 +463,8 @@ extern int tsInsertHeadSize; extern int tscNumOfThreads; extern SIpStrList tscMgmtIpList; +typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows); + #ifdef __cplusplus } #endif diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 958252b4deca7708c99e6b762613813c2f9d330b..8dbe63d75a73dd18a15bc1da8f99c7b8db774eea 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -135,7 +135,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;JI)J */ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp - (JNIEnv *, jobject, jstring, jstring, jstring, jstring, jstring, jlong, jint); + (JNIEnv *, jobject, jlong, jboolean, jstring, jstring, jint); /* * Class: com_taosdata_jdbc_TSDBJNIConnector @@ -143,7 +143,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp * Signature: (J)Lcom/taosdata/jdbc/TSDBResultSetRowData; */ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp - (JNIEnv *, jobject, jlong); + (JNIEnv *, jobject, jlong, jint); /* * Class: com_taosdata_jdbc_TSDBJNIConnector @@ -151,7 +151,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp * Signature: (J)V */ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp - (JNIEnv *, jobject, jlong); + (JNIEnv *, jobject, jlong, jboolean); /* * Class: com_taosdata_jdbc_TSDBJNIConnector diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 9cec4f4b0f2e4f67448f35afddaec14e92bce7cf..228403c79d318d922f5571a9663b3c97bbffbbc8 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -20,6 +20,7 @@ #include "tscJoinProcess.h" #include "tsclient.h" #include "tscUtil.h" +#include "ttime.h" int __init = 0; @@ -514,92 +515,42 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm } } -JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jstring jhost, - jstring juser, jstring jpass, jstring jdb, - jstring jtable, jlong jtime, - jint jperiod) { - TAOS_SUB *tsub; - jlong sub = 0; - char * host = NULL; - char * user = NULL; - char * pass = NULL; - char * db = NULL; - char * table = NULL; - int64_t time = 0; - int period = 0; +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con, + jboolean restart, jstring jtopic, jstring jsql, jint jinterval) { + jlong sub = 0; + TAOS *taos = (TAOS *)con; + char *topic = NULL; + char *sql = NULL; jniGetGlobalMethod(env); jniTrace("jobj:%p, in TSDBJNIConnector_subscribeImp", jobj); - if (jhost != NULL) { - host = (char *)(*env)->GetStringUTFChars(env, jhost, NULL); - } - if (juser != NULL) { - user = (char *)(*env)->GetStringUTFChars(env, juser, NULL); - } - if (jpass != NULL) { - pass = (char *)(*env)->GetStringUTFChars(env, jpass, NULL); - } - if (jdb != NULL) { - db = (char *)(*env)->GetStringUTFChars(env, jdb, NULL); - } - if (jtable != NULL) { - table = (char *)(*env)->GetStringUTFChars(env, jtable, NULL); + if (jtopic != NULL) { + topic = (char *)(*env)->GetStringUTFChars(env, jtopic, NULL); } - time = (int64_t)jtime; - period = (int)jperiod; - - if (user == NULL) { - jniTrace("jobj:%p, user is null, use tsDefaultUser", jobj); - user = tsDefaultUser; - } - if (pass == NULL) { - jniTrace("jobj:%p, pass is null, use tsDefaultPass", jobj); - pass = tsDefaultPass; + if (jsql != NULL) { + sql = (char *)(*env)->GetStringUTFChars(env, jsql, NULL); } - jniTrace("jobj:%p, host:%s, user:%s, pass:%s, db:%s, table:%s, time:%d, period:%d", jobj, host, user, pass, db, table, - time, period); - tsub = taos_subscribe(host, user, pass, db, table, time, period); + TAOS_SUB *tsub = taos_subscribe(taos, (int)restart, topic, sql, NULL, NULL, jinterval); sub = (jlong)tsub; if (sub == 0) { - jniTrace("jobj:%p, failed to subscribe to db:%s, table:%s", jobj, db, table); + jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, jtopic); } else { - jniTrace("jobj:%p, successfully subscribe to db:%s, table:%s, sub:%ld, tsub:%p", jobj, db, table, sub, tsub); + jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, jtopic); } - if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host); - if (user != NULL && user != tsDefaultUser) (*env)->ReleaseStringUTFChars(env, juser, user); - if (pass != NULL && pass != tsDefaultPass) (*env)->ReleaseStringUTFChars(env, jpass, pass); - if (db != NULL) (*env)->ReleaseStringUTFChars(env, jdb, db); - if (table != NULL) (*env)->ReleaseStringUTFChars(env, jtable, table); + if (topic != NULL) (*env)->ReleaseStringUTFChars(env, jtopic, topic); + if (sql != NULL) (*env)->ReleaseStringUTFChars(env, jsql, sql); return sub; } -JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) { - jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); - - TAOS_SUB * tsub = (TAOS_SUB *)sub; - TAOS_ROW row = taos_consume(tsub); - TAOS_FIELD *fields = taos_fetch_subfields(tsub); - int num_fields = taos_subfields_count(tsub); - - jniGetGlobalMethod(env); - - jniTrace("jobj:%p, check fields:%p, num_fields=%d", jobj, fields, num_fields); - +static jobject convert_one_row(JNIEnv *env, TAOS_ROW row, TAOS_FIELD* fields, int num_fields) { jobject rowobj = (*env)->NewObject(env, g_rowdataClass, g_rowdataConstructor, num_fields); jniTrace("created a rowdata object, rowobj:%p", rowobj); - if (row == NULL) { - jniTrace("jobj:%p, tsub:%p, fields size is %d, fetch row to the end", jobj, tsub, num_fields); - return NULL; - } - - char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; - for (int i = 0; i < num_fields; i++) { if (row[i] == NULL) { continue; @@ -634,6 +585,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI } break; case TSDB_DATA_TYPE_BINARY: { + char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist (*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp)); @@ -642,7 +594,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI } case TSDB_DATA_TYPE_NCHAR: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i, - jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes)); + jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes)); break; case TSDB_DATA_TYPE_TIMESTAMP: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i])); @@ -651,13 +603,56 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI break; } } - jniTrace("jobj:%p, rowdata retrieved, rowobj:%p", jobj, rowobj); return rowobj; } -JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub) { +JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub, jint timeout) { + jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); + jniGetGlobalMethod(env); + + TAOS_SUB *tsub = (TAOS_SUB *)sub; + jobject rows = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp); + + int64_t start = taosGetTimestampMs(); + int count = 0; + + while (true) { + TAOS_RES * res = taos_consume(tsub); + if (res == NULL) { + jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub); + return NULL; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + int num_fields = taos_num_fields(res); + while (true) { + TAOS_ROW row = taos_fetch_row(res); + if (row == NULL) { + break; + } + jobject rowobj = convert_one_row(env, row, fields, num_fields); + (*env)->CallBooleanMethod(env, rows, g_arrayListAddFp, rowobj); + count++; + } + + if (count > 0) { + break; + } + if (timeout == -1) { + continue; + } + if (((int)(taosGetTimestampMs() - start)) >= timeout) { + jniTrace("jobj:%p, sub:%ld, timeout", jobj, sub); + break; + } + } + + return rows; +} + +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub, jboolean keepProgress) { TAOS_SUB *tsub = (TAOS_SUB *)sub; - taos_unsubscribe(tsub); + taos_unsubscribe(tsub, keepProgress); } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp(JNIEnv *env, jobject jobj, diff --git a/src/client/src/sql.c b/src/client/src/sql.c index 51b8a9b9e6c4db065fe8a1902ba665150868307b..54df3e36696359b429aa97ad5f06c6d93b8e67bf 100644 --- a/src/client/src/sql.c +++ b/src/client/src/sql.c @@ -78,39 +78,41 @@ ** defined, then do no error processing. ** YYNSTATE the combined number of states. ** YYNRULE the number of rules in the grammar +** YYNTOKEN Number of terminal symbols ** YY_MAX_SHIFT Maximum value for shift actions ** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions ** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions -** YY_MIN_REDUCE Maximum value for reduce actions ** YY_ERROR_ACTION The yy_action[] code for syntax error ** YY_ACCEPT_ACTION The yy_action[] code for accept ** YY_NO_ACTION The yy_action[] code for no-op +** YY_MIN_REDUCE Minimum value for reduce actions +** YY_MAX_REDUCE Maximum value for reduce actions */ #ifndef INTERFACE # define INTERFACE 1 #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 262 +#define YYNOCODE 268 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SSQLToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SQuerySQL* yy138; - SCreateAcctSQL yy155; - SLimitVal yy162; - int yy220; - tVariant yy236; - tSQLExprListList* yy237; - tSQLExpr* yy244; - SCreateDBInfo yy262; - tSQLExprList* yy284; - SCreateTableSQL* yy344; - int64_t yy369; - TAOS_FIELD yy397; - tFieldList* yy421; - tVariantList* yy480; + tVariantList* yy30; + SLimitVal yy150; + SCreateTableSQL* yy212; + SCreateAcctSQL yy239; + int yy250; + SSubclauseInfo* yy309; + tFieldList* yy325; + tVariant yy380; + tSQLExpr* yy388; + SQuerySQL* yy444; + int64_t yy489; + TAOS_FIELD yy505; + tSQLExprList* yy506; + SCreateDBInfo yy532; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -120,22 +122,19 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 253 -#define YYNRULE 217 -#define YY_MAX_SHIFT 252 -#define YY_MIN_SHIFTREDUCE 404 -#define YY_MAX_SHIFTREDUCE 620 -#define YY_MIN_REDUCE 621 -#define YY_MAX_REDUCE 837 -#define YY_ERROR_ACTION 838 -#define YY_ACCEPT_ACTION 839 -#define YY_NO_ACTION 840 +#define YYNSTATE 247 +#define YYNRULE 216 +#define YYNTOKEN 203 +#define YY_MAX_SHIFT 246 +#define YY_MIN_SHIFTREDUCE 399 +#define YY_MAX_SHIFTREDUCE 614 +#define YY_ERROR_ACTION 615 +#define YY_ACCEPT_ACTION 616 +#define YY_NO_ACTION 617 +#define YY_MIN_REDUCE 618 +#define YY_MAX_REDUCE 833 /************* End control #defines *******************************************/ -/* The yyzerominor constant is used to initialize instances of -** YYMINORTYPE objects to zero. */ -static const YYMINORTYPE yyzerominor = { 0 }; - /* Define the yytestcase() macro to be a no-op if is not already defined ** otherwise. ** @@ -163,9 +162,6 @@ static const YYMINORTYPE yyzerominor = { 0 }; ** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then ** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE. ** -** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE -** and YY_MAX_REDUCE - ** N == YY_ERROR_ACTION A syntax error has occurred. ** ** N == YY_ACCEPT_ACTION The parser accepts its input. @@ -173,21 +169,22 @@ static const YYMINORTYPE yyzerominor = { 0 }; ** N == YY_NO_ACTION No such action. Denotes unused ** slots in the yy_action[] table. ** +** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE +** and YY_MAX_REDUCE +** ** The action table is constructed as a single large table named yy_action[]. -** Given state S and lookahead X, the action is computed as +** Given state S and lookahead X, the action is computed as either: ** -** yy_action[ yy_shift_ofst[S] + X ] +** (A) N = yy_action[ yy_shift_ofst[S] + X ] +** (B) N = yy_default[S] ** -** If the index value yy_shift_ofst[S]+X is out of range or if the value -** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S] -** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table -** and that yy_default[S] should be used instead. +** The (A) formula is preferred. The B formula is used instead if +** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X. ** -** The formula above is for computing the action when the lookahead is +** The formulas above are for computing the action when the lookahead is ** a terminal symbol. If the lookahead is a non-terminal (as occurs after ** a reduce action) then the yy_reduce_ofst[] array is used in place of -** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of -** YY_SHIFT_USE_DFLT. +** the yy_shift_ofst[] array. ** ** The following are the tables generated in this section: ** @@ -201,198 +198,212 @@ static const YYMINORTYPE yyzerominor = { 0 }; ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (534) +#define YY_ACTTAB_COUNT (529) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 445, 74, 78, 245, 85, 77, 154, 250, 446, 839, - /* 10 */ 252, 80, 43, 45, 7, 37, 38, 62, 112, 172, - /* 20 */ 31, 445, 445, 206, 41, 39, 42, 40, 242, 446, - /* 30 */ 446, 136, 36, 35, 10, 102, 34, 33, 32, 43, - /* 40 */ 45, 602, 37, 38, 157, 526, 136, 31, 136, 134, - /* 50 */ 206, 41, 39, 42, 40, 160, 603, 159, 603, 36, - /* 60 */ 35, 155, 516, 34, 33, 32, 405, 406, 407, 408, - /* 70 */ 409, 410, 411, 412, 413, 414, 415, 416, 251, 21, - /* 80 */ 43, 45, 173, 37, 38, 228, 227, 203, 31, 59, - /* 90 */ 21, 206, 41, 39, 42, 40, 34, 33, 32, 57, - /* 100 */ 36, 35, 552, 553, 34, 33, 32, 45, 233, 37, - /* 110 */ 38, 168, 133, 513, 31, 21, 21, 206, 41, 39, - /* 120 */ 42, 40, 169, 571, 513, 504, 36, 35, 135, 179, - /* 130 */ 34, 33, 32, 244, 37, 38, 187, 514, 184, 31, - /* 140 */ 534, 102, 206, 41, 39, 42, 40, 229, 234, 513, - /* 150 */ 513, 36, 35, 231, 230, 34, 33, 32, 17, 220, - /* 160 */ 243, 219, 218, 217, 216, 215, 214, 213, 212, 498, - /* 170 */ 140, 487, 488, 489, 490, 491, 492, 493, 494, 495, - /* 180 */ 496, 497, 164, 584, 11, 98, 575, 134, 578, 531, - /* 190 */ 581, 599, 164, 584, 167, 558, 575, 201, 578, 156, - /* 200 */ 581, 36, 35, 149, 221, 34, 33, 32, 21, 87, - /* 210 */ 86, 143, 516, 244, 161, 162, 102, 148, 205, 249, - /* 220 */ 248, 93, 516, 76, 161, 162, 164, 584, 532, 242, - /* 230 */ 575, 102, 578, 515, 581, 194, 41, 39, 42, 40, - /* 240 */ 243, 598, 512, 27, 36, 35, 49, 573, 34, 33, - /* 250 */ 32, 115, 116, 225, 65, 68, 507, 443, 161, 162, - /* 260 */ 125, 193, 520, 50, 189, 517, 501, 518, 500, 519, - /* 270 */ 557, 151, 129, 127, 246, 89, 88, 44, 452, 444, - /* 280 */ 61, 125, 125, 574, 597, 60, 583, 44, 577, 529, - /* 290 */ 580, 28, 18, 170, 171, 607, 583, 163, 608, 29, - /* 300 */ 543, 582, 29, 544, 47, 52, 601, 15, 152, 585, - /* 310 */ 14, 582, 576, 14, 579, 510, 73, 72, 509, 47, - /* 320 */ 53, 44, 22, 210, 524, 153, 525, 22, 141, 522, - /* 330 */ 583, 523, 9, 8, 2, 84, 83, 142, 144, 145, - /* 340 */ 146, 617, 147, 138, 132, 582, 139, 137, 533, 568, - /* 350 */ 99, 567, 165, 564, 563, 166, 232, 550, 549, 190, - /* 360 */ 113, 114, 521, 454, 111, 211, 130, 25, 192, 224, - /* 370 */ 226, 616, 70, 615, 613, 117, 472, 26, 23, 131, - /* 380 */ 441, 91, 79, 439, 81, 437, 436, 539, 195, 199, - /* 390 */ 174, 54, 126, 434, 433, 432, 430, 422, 527, 128, - /* 400 */ 428, 51, 426, 103, 46, 204, 104, 424, 202, 200, - /* 410 */ 198, 96, 196, 30, 537, 538, 551, 27, 223, 75, - /* 420 */ 235, 236, 237, 238, 239, 208, 240, 241, 247, 55, - /* 430 */ 620, 175, 150, 176, 63, 66, 178, 177, 619, 181, - /* 440 */ 435, 180, 182, 429, 120, 183, 119, 473, 118, 121, - /* 450 */ 122, 124, 123, 90, 92, 511, 1, 24, 107, 105, - /* 460 */ 108, 106, 109, 618, 185, 110, 186, 611, 97, 188, - /* 470 */ 12, 13, 191, 540, 58, 100, 158, 545, 197, 101, - /* 480 */ 4, 64, 485, 586, 3, 19, 20, 5, 16, 207, - /* 490 */ 6, 209, 484, 483, 482, 481, 480, 479, 478, 476, - /* 500 */ 47, 222, 449, 67, 451, 22, 506, 48, 505, 503, - /* 510 */ 56, 470, 468, 460, 466, 462, 464, 69, 71, 458, - /* 520 */ 456, 477, 475, 82, 431, 447, 94, 420, 418, 621, - /* 530 */ 623, 623, 623, 95, + /* 0 */ 752, 440, 132, 150, 244, 10, 616, 246, 132, 441, + /* 10 */ 132, 155, 821, 41, 43, 20, 35, 36, 820, 154, + /* 20 */ 821, 29, 741, 440, 200, 39, 37, 40, 38, 131, + /* 30 */ 499, 441, 96, 34, 33, 100, 151, 32, 31, 30, + /* 40 */ 41, 43, 741, 35, 36, 152, 136, 163, 29, 727, + /* 50 */ 749, 200, 39, 37, 40, 38, 185, 100, 225, 224, + /* 60 */ 34, 33, 162, 730, 32, 31, 30, 400, 401, 402, + /* 70 */ 403, 404, 405, 406, 407, 408, 409, 410, 411, 245, + /* 80 */ 730, 41, 43, 188, 35, 36, 215, 236, 197, 29, + /* 90 */ 58, 20, 200, 39, 37, 40, 38, 32, 31, 30, + /* 100 */ 56, 34, 33, 75, 730, 32, 31, 30, 43, 236, + /* 110 */ 35, 36, 776, 817, 195, 29, 20, 20, 200, 39, + /* 120 */ 37, 40, 38, 164, 570, 727, 227, 34, 33, 440, + /* 130 */ 167, 32, 31, 30, 238, 35, 36, 441, 7, 816, + /* 140 */ 29, 61, 110, 200, 39, 37, 40, 38, 223, 228, + /* 150 */ 727, 727, 34, 33, 50, 728, 32, 31, 30, 15, + /* 160 */ 214, 237, 213, 212, 211, 210, 209, 208, 207, 206, + /* 170 */ 712, 51, 701, 702, 703, 704, 705, 706, 707, 708, + /* 180 */ 709, 710, 711, 159, 583, 11, 815, 574, 100, 577, + /* 190 */ 100, 580, 168, 159, 583, 222, 221, 574, 16, 577, + /* 200 */ 20, 580, 34, 33, 145, 26, 32, 31, 30, 238, + /* 210 */ 86, 85, 139, 174, 657, 156, 157, 123, 144, 199, + /* 220 */ 182, 715, 179, 714, 148, 156, 157, 159, 583, 531, + /* 230 */ 60, 574, 149, 577, 726, 580, 237, 16, 39, 37, + /* 240 */ 40, 38, 27, 775, 26, 59, 34, 33, 551, 552, + /* 250 */ 32, 31, 30, 137, 113, 114, 219, 64, 67, 156, + /* 260 */ 157, 95, 515, 666, 184, 512, 123, 513, 26, 514, + /* 270 */ 523, 147, 127, 125, 240, 88, 87, 187, 42, 158, + /* 280 */ 73, 77, 239, 84, 76, 572, 528, 729, 42, 582, + /* 290 */ 79, 17, 658, 165, 166, 123, 243, 242, 92, 582, + /* 300 */ 47, 542, 543, 600, 581, 45, 13, 12, 584, 576, + /* 310 */ 138, 579, 12, 575, 581, 578, 2, 72, 71, 48, + /* 320 */ 505, 573, 42, 743, 45, 504, 204, 9, 8, 21, + /* 330 */ 21, 140, 519, 582, 520, 517, 141, 518, 83, 82, + /* 340 */ 142, 143, 134, 130, 135, 830, 133, 786, 581, 785, + /* 350 */ 160, 782, 781, 161, 751, 721, 768, 226, 97, 767, + /* 360 */ 111, 112, 516, 668, 205, 109, 128, 24, 218, 220, + /* 370 */ 829, 69, 26, 828, 826, 115, 186, 686, 25, 22, + /* 380 */ 90, 129, 655, 78, 653, 80, 651, 650, 169, 538, + /* 390 */ 124, 648, 189, 647, 646, 644, 636, 193, 52, 740, + /* 400 */ 126, 642, 640, 638, 49, 755, 756, 101, 769, 44, + /* 410 */ 198, 196, 194, 28, 192, 190, 217, 74, 229, 230, + /* 420 */ 202, 232, 231, 614, 233, 234, 53, 235, 241, 170, + /* 430 */ 146, 62, 171, 65, 173, 172, 613, 176, 175, 178, + /* 440 */ 649, 177, 612, 89, 91, 117, 687, 118, 116, 119, + /* 450 */ 120, 643, 104, 102, 122, 725, 106, 103, 105, 121, + /* 460 */ 107, 1, 108, 23, 180, 181, 605, 183, 187, 525, + /* 470 */ 55, 539, 153, 98, 57, 191, 18, 63, 4, 544, + /* 480 */ 99, 5, 585, 3, 19, 14, 201, 6, 203, 480, + /* 490 */ 479, 478, 477, 476, 475, 474, 473, 471, 45, 444, + /* 500 */ 66, 446, 21, 501, 216, 68, 500, 498, 54, 465, + /* 510 */ 46, 463, 455, 70, 461, 457, 459, 453, 451, 472, + /* 520 */ 470, 81, 426, 442, 93, 415, 94, 413, 618, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 1, 64, 65, 66, 67, 68, 199, 200, 9, 197, - /* 10 */ 198, 74, 13, 14, 96, 16, 17, 99, 100, 63, - /* 20 */ 21, 1, 1, 24, 25, 26, 27, 28, 78, 9, - /* 30 */ 9, 248, 33, 34, 248, 200, 37, 38, 39, 13, - /* 40 */ 14, 258, 16, 17, 217, 233, 248, 21, 248, 248, - /* 50 */ 24, 25, 26, 27, 28, 257, 258, 257, 258, 33, - /* 60 */ 34, 260, 235, 37, 38, 39, 45, 46, 47, 48, - /* 70 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 200, - /* 80 */ 13, 14, 126, 16, 17, 129, 130, 252, 21, 254, - /* 90 */ 200, 24, 25, 26, 27, 28, 37, 38, 39, 100, - /* 100 */ 33, 34, 111, 112, 37, 38, 39, 14, 200, 16, - /* 110 */ 17, 232, 248, 234, 21, 200, 200, 24, 25, 26, - /* 120 */ 27, 28, 232, 97, 234, 5, 33, 34, 248, 125, - /* 130 */ 37, 38, 39, 60, 16, 17, 132, 229, 134, 21, - /* 140 */ 200, 200, 24, 25, 26, 27, 28, 232, 232, 234, - /* 150 */ 234, 33, 34, 33, 34, 37, 38, 39, 85, 86, - /* 160 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 216, - /* 170 */ 248, 218, 219, 220, 221, 222, 223, 224, 225, 226, - /* 180 */ 227, 228, 1, 2, 44, 200, 5, 248, 7, 249, - /* 190 */ 9, 248, 1, 2, 217, 254, 5, 256, 7, 260, - /* 200 */ 9, 33, 34, 63, 217, 37, 38, 39, 200, 69, - /* 210 */ 70, 71, 235, 60, 33, 34, 200, 77, 37, 60, - /* 220 */ 61, 62, 235, 72, 33, 34, 1, 2, 37, 78, - /* 230 */ 5, 200, 7, 235, 9, 250, 25, 26, 27, 28, - /* 240 */ 87, 248, 234, 103, 33, 34, 101, 1, 37, 38, - /* 250 */ 39, 64, 65, 66, 67, 68, 231, 204, 33, 34, - /* 260 */ 207, 121, 2, 118, 124, 5, 218, 7, 220, 9, - /* 270 */ 254, 131, 64, 65, 66, 67, 68, 96, 204, 204, - /* 280 */ 236, 207, 207, 37, 248, 254, 105, 96, 5, 101, - /* 290 */ 7, 247, 104, 33, 34, 97, 105, 59, 97, 101, - /* 300 */ 97, 120, 101, 97, 101, 101, 97, 101, 248, 97, - /* 310 */ 101, 120, 5, 101, 7, 97, 127, 128, 97, 101, - /* 320 */ 116, 96, 101, 97, 5, 248, 7, 101, 248, 5, - /* 330 */ 105, 7, 127, 128, 96, 72, 73, 248, 248, 248, - /* 340 */ 248, 235, 248, 248, 248, 120, 248, 248, 200, 230, - /* 350 */ 200, 230, 230, 230, 230, 230, 230, 255, 255, 123, - /* 360 */ 200, 200, 102, 200, 237, 200, 200, 200, 259, 200, - /* 370 */ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - /* 380 */ 200, 59, 200, 200, 200, 200, 200, 105, 251, 251, - /* 390 */ 200, 115, 200, 200, 200, 200, 200, 200, 246, 200, - /* 400 */ 200, 117, 200, 245, 114, 109, 244, 200, 113, 108, - /* 410 */ 107, 201, 106, 119, 201, 201, 201, 103, 75, 84, - /* 420 */ 83, 49, 80, 82, 53, 201, 81, 79, 75, 201, - /* 430 */ 5, 133, 201, 5, 205, 205, 58, 133, 5, 5, - /* 440 */ 201, 133, 133, 201, 209, 58, 213, 215, 214, 212, - /* 450 */ 210, 208, 211, 202, 202, 233, 206, 203, 241, 243, - /* 460 */ 240, 242, 239, 5, 133, 238, 58, 86, 122, 125, - /* 470 */ 96, 96, 123, 97, 101, 96, 1, 97, 96, 96, - /* 480 */ 110, 72, 9, 97, 96, 101, 101, 110, 96, 98, - /* 490 */ 96, 98, 5, 5, 5, 5, 1, 5, 5, 5, - /* 500 */ 101, 15, 76, 72, 58, 101, 5, 16, 5, 97, - /* 510 */ 96, 5, 5, 5, 5, 5, 5, 128, 128, 5, - /* 520 */ 5, 5, 5, 58, 58, 76, 21, 59, 58, 0, - /* 530 */ 261, 261, 261, 21, + /* 0 */ 207, 1, 256, 206, 207, 256, 204, 205, 256, 9, + /* 10 */ 256, 265, 266, 13, 14, 207, 16, 17, 266, 265, + /* 20 */ 266, 21, 240, 1, 24, 25, 26, 27, 28, 256, + /* 30 */ 5, 9, 207, 33, 34, 207, 254, 37, 38, 39, + /* 40 */ 13, 14, 240, 16, 17, 224, 256, 239, 21, 241, + /* 50 */ 257, 24, 25, 26, 27, 28, 254, 207, 33, 34, + /* 60 */ 33, 34, 224, 242, 37, 38, 39, 45, 46, 47, + /* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 80 */ 242, 13, 14, 258, 16, 17, 224, 78, 260, 21, + /* 90 */ 262, 207, 24, 25, 26, 27, 28, 37, 38, 39, + /* 100 */ 100, 33, 34, 72, 242, 37, 38, 39, 14, 78, + /* 110 */ 16, 17, 262, 256, 264, 21, 207, 207, 24, 25, + /* 120 */ 26, 27, 28, 239, 97, 241, 207, 33, 34, 1, + /* 130 */ 63, 37, 38, 39, 60, 16, 17, 9, 96, 256, + /* 140 */ 21, 99, 100, 24, 25, 26, 27, 28, 239, 239, + /* 150 */ 241, 241, 33, 34, 101, 236, 37, 38, 39, 85, + /* 160 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + /* 170 */ 223, 118, 225, 226, 227, 228, 229, 230, 231, 232, + /* 180 */ 233, 234, 235, 1, 2, 44, 256, 5, 207, 7, + /* 190 */ 207, 9, 125, 1, 2, 128, 129, 5, 96, 7, + /* 200 */ 207, 9, 33, 34, 63, 103, 37, 38, 39, 60, + /* 210 */ 69, 70, 71, 124, 211, 33, 34, 214, 77, 37, + /* 220 */ 131, 225, 133, 227, 256, 33, 34, 1, 2, 37, + /* 230 */ 243, 5, 256, 7, 241, 9, 87, 96, 25, 26, + /* 240 */ 27, 28, 255, 262, 103, 262, 33, 34, 113, 114, + /* 250 */ 37, 38, 39, 256, 64, 65, 66, 67, 68, 33, + /* 260 */ 34, 96, 2, 211, 123, 5, 214, 7, 103, 9, + /* 270 */ 97, 130, 64, 65, 66, 67, 68, 104, 96, 59, + /* 280 */ 64, 65, 66, 67, 68, 1, 101, 242, 96, 107, + /* 290 */ 74, 106, 211, 33, 34, 214, 60, 61, 62, 107, + /* 300 */ 101, 97, 97, 97, 122, 101, 101, 101, 97, 5, + /* 310 */ 256, 7, 101, 5, 122, 7, 96, 126, 127, 120, + /* 320 */ 97, 37, 96, 240, 101, 97, 97, 126, 127, 101, + /* 330 */ 101, 256, 5, 107, 7, 5, 256, 7, 72, 73, + /* 340 */ 256, 256, 256, 256, 256, 242, 256, 237, 122, 237, + /* 350 */ 237, 237, 237, 237, 207, 238, 263, 237, 207, 263, + /* 360 */ 207, 207, 102, 207, 207, 244, 207, 207, 207, 207, + /* 370 */ 207, 207, 103, 207, 207, 207, 240, 207, 207, 207, + /* 380 */ 59, 207, 207, 207, 207, 207, 207, 207, 207, 107, + /* 390 */ 207, 207, 259, 207, 207, 207, 207, 259, 117, 253, + /* 400 */ 207, 207, 207, 207, 119, 208, 208, 252, 208, 116, + /* 410 */ 111, 115, 110, 121, 109, 108, 75, 84, 83, 49, + /* 420 */ 208, 82, 80, 5, 53, 81, 208, 79, 75, 132, + /* 430 */ 208, 212, 5, 212, 58, 132, 5, 5, 132, 58, + /* 440 */ 208, 132, 5, 209, 209, 220, 222, 216, 221, 219, + /* 450 */ 217, 208, 249, 251, 215, 240, 247, 250, 248, 218, + /* 460 */ 246, 213, 245, 210, 132, 58, 86, 124, 104, 97, + /* 470 */ 105, 97, 1, 96, 101, 96, 101, 72, 112, 97, + /* 480 */ 96, 112, 97, 96, 101, 96, 98, 96, 98, 9, + /* 490 */ 5, 5, 5, 5, 1, 5, 5, 5, 101, 76, + /* 500 */ 72, 58, 101, 5, 15, 127, 5, 97, 96, 5, + /* 510 */ 16, 5, 5, 127, 5, 5, 5, 5, 5, 5, + /* 520 */ 5, 58, 58, 76, 21, 59, 21, 58, 0, 267, + /* 530 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 540 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 550 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 560 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 570 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 580 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 590 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 600 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 610 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 620 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 630 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 640 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 650 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 660 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 670 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 680 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 690 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 700 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 710 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 720 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 730 */ 267, 267, }; -#define YY_SHIFT_USE_DFLT (-83) -#define YY_SHIFT_COUNT (252) -#define YY_SHIFT_MIN (-82) -#define YY_SHIFT_MAX (529) -static const short yy_shift_ofst[] = { - /* 0 */ 140, 73, 181, 225, 20, 20, 20, 20, 20, 20, - /* 10 */ -1, 21, 225, 225, 225, 260, 260, 260, 20, 20, - /* 20 */ 20, 20, 20, 151, 153, -50, -50, -83, 191, 225, - /* 30 */ 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, - /* 40 */ 225, 225, 225, 225, 225, 225, 225, 260, 260, 120, - /* 50 */ 120, 120, 120, 120, 120, -82, 120, 20, 20, -9, - /* 60 */ -9, 188, 20, 20, 20, 20, 20, 20, 20, 20, - /* 70 */ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - /* 80 */ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - /* 90 */ 20, 20, 20, 20, 20, 20, 236, 322, 322, 322, - /* 100 */ 282, 282, 322, 276, 284, 290, 296, 295, 301, 303, - /* 110 */ 306, 294, 314, 322, 322, 343, 343, 322, 335, 337, - /* 120 */ 372, 342, 341, 371, 345, 348, 322, 353, 322, 353, - /* 130 */ -83, -83, 26, 67, 67, 67, 67, 67, 93, 118, - /* 140 */ 211, 211, 211, -63, 168, 168, 168, 168, 187, 208, - /* 150 */ -44, 4, 59, 59, 159, 198, 201, 203, 206, 209, - /* 160 */ 212, 283, 307, 246, 238, 145, 204, 218, 221, 226, - /* 170 */ 319, 324, 189, 205, 263, 425, 298, 428, 304, 378, - /* 180 */ 433, 308, 434, 309, 387, 458, 331, 408, 381, 344, - /* 190 */ 374, 375, 349, 346, 373, 376, 379, 475, 382, 380, - /* 200 */ 383, 384, 370, 385, 377, 386, 388, 392, 391, 394, - /* 210 */ 393, 409, 473, 487, 488, 489, 490, 495, 492, 493, - /* 220 */ 494, 399, 426, 486, 431, 446, 491, 389, 390, 404, - /* 230 */ 501, 503, 412, 414, 404, 506, 507, 508, 509, 510, - /* 240 */ 511, 514, 515, 516, 517, 465, 466, 449, 505, 512, - /* 250 */ 468, 470, 529, +#define YY_SHIFT_COUNT (246) +#define YY_SHIFT_MIN (0) +#define YY_SHIFT_MAX (528) +static const unsigned short int yy_shift_ofst[] = { + /* 0 */ 141, 74, 182, 226, 128, 128, 128, 128, 128, 128, + /* 10 */ 0, 22, 226, 260, 260, 260, 102, 128, 128, 128, + /* 20 */ 128, 128, 31, 149, 9, 9, 529, 192, 226, 226, + /* 30 */ 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, + /* 40 */ 226, 226, 226, 226, 226, 260, 260, 25, 25, 25, + /* 50 */ 25, 25, 25, 42, 25, 165, 128, 128, 135, 135, + /* 60 */ 185, 128, 128, 128, 128, 128, 128, 128, 128, 128, + /* 70 */ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + /* 80 */ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + /* 90 */ 128, 128, 128, 128, 128, 269, 321, 321, 282, 282, + /* 100 */ 321, 281, 285, 293, 299, 296, 302, 305, 307, 292, + /* 110 */ 269, 321, 321, 341, 341, 321, 333, 335, 370, 342, + /* 120 */ 339, 371, 344, 348, 321, 353, 321, 353, 529, 529, + /* 130 */ 27, 68, 68, 68, 94, 119, 213, 213, 213, 216, + /* 140 */ 169, 169, 169, 169, 190, 208, 67, 89, 60, 60, + /* 150 */ 236, 173, 204, 205, 206, 211, 304, 308, 284, 220, + /* 160 */ 199, 53, 223, 228, 229, 327, 330, 191, 201, 266, + /* 170 */ 418, 297, 427, 303, 376, 431, 306, 432, 309, 381, + /* 180 */ 437, 332, 407, 380, 343, 364, 372, 365, 373, 374, + /* 190 */ 377, 471, 379, 382, 384, 375, 366, 383, 369, 385, + /* 200 */ 387, 389, 388, 391, 390, 405, 480, 485, 486, 487, + /* 210 */ 488, 493, 490, 491, 492, 397, 423, 489, 428, 443, + /* 220 */ 494, 378, 386, 401, 498, 501, 410, 412, 401, 504, + /* 230 */ 506, 507, 509, 510, 511, 512, 513, 514, 515, 463, + /* 240 */ 464, 447, 503, 505, 466, 469, 528, }; -#define YY_REDUCE_USE_DFLT (-218) -#define YY_REDUCE_COUNT (131) -#define YY_REDUCE_MIN (-217) -#define YY_REDUCE_MAX (254) +#define YY_REDUCE_COUNT (129) +#define YY_REDUCE_MIN (-254) +#define YY_REDUCE_MAX (253) static const short yy_reduce_ofst[] = { - /* 0 */ -188, -47, -202, -200, -59, -165, -121, -110, -85, -84, - /* 10 */ -60, -193, -199, -61, -217, -173, -23, -13, -15, 16, - /* 20 */ 31, -92, 8, 53, 48, 74, 75, 44, -214, -136, - /* 30 */ -120, -78, -57, -7, 36, 60, 77, 80, 89, 90, - /* 40 */ 91, 92, 94, 95, 96, 98, 99, -2, 106, 119, - /* 50 */ 121, 122, 123, 124, 125, 25, 126, 148, 150, 102, - /* 60 */ 103, 127, 160, 161, 163, 165, 166, 167, 169, 170, - /* 70 */ 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, - /* 80 */ 182, 183, 184, 185, 186, 190, 192, 193, 194, 195, - /* 90 */ 196, 197, 199, 200, 202, 207, 109, 210, 213, 214, - /* 100 */ 137, 138, 215, 152, 158, 162, 216, 219, 217, 220, - /* 110 */ 223, 227, 222, 224, 228, 229, 230, 231, 232, 234, - /* 120 */ 233, 235, 237, 240, 241, 243, 239, 251, 242, 252, - /* 130 */ 250, 254, + /* 0 */ -198, -53, -254, -246, -150, -172, -192, -116, -91, -90, + /* 10 */ -207, -203, -248, -179, -162, -138, -218, -175, -19, -17, + /* 20 */ -81, -7, 3, -4, 52, 81, -13, -251, -227, -210, + /* 30 */ -143, -117, -70, -32, -24, -3, 54, 75, 80, 84, + /* 40 */ 85, 86, 87, 88, 90, 45, 103, 110, 112, 113, + /* 50 */ 114, 115, 116, 117, 120, 83, 147, 151, 93, 96, + /* 60 */ 121, 153, 154, 156, 157, 159, 160, 161, 162, 163, + /* 70 */ 164, 166, 167, 168, 170, 171, 172, 174, 175, 176, + /* 80 */ 177, 178, 179, 180, 181, 183, 184, 186, 187, 188, + /* 90 */ 189, 193, 194, 195, 196, 136, 197, 198, 133, 138, + /* 100 */ 200, 146, 155, 202, 207, 203, 210, 209, 214, 217, + /* 110 */ 215, 212, 218, 219, 221, 222, 224, 227, 225, 231, + /* 120 */ 230, 233, 241, 239, 232, 234, 243, 235, 248, 253, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 838, 670, 822, 822, 838, 838, 838, 838, 838, 838, - /* 10 */ 752, 636, 838, 838, 822, 838, 838, 838, 838, 838, - /* 20 */ 838, 838, 838, 672, 659, 672, 672, 747, 838, 838, - /* 30 */ 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, - /* 40 */ 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, - /* 50 */ 838, 838, 838, 838, 838, 838, 838, 838, 838, 771, - /* 60 */ 771, 745, 838, 838, 838, 838, 838, 838, 838, 838, - /* 70 */ 838, 838, 838, 838, 838, 838, 838, 838, 838, 657, - /* 80 */ 838, 655, 838, 838, 838, 838, 838, 838, 838, 838, - /* 90 */ 838, 838, 838, 644, 838, 838, 838, 638, 638, 638, - /* 100 */ 838, 838, 638, 778, 782, 776, 764, 772, 763, 759, - /* 110 */ 758, 786, 838, 638, 638, 667, 667, 638, 688, 686, - /* 120 */ 684, 676, 682, 678, 680, 674, 638, 665, 638, 665, - /* 130 */ 703, 716, 838, 826, 827, 787, 821, 777, 805, 804, - /* 140 */ 817, 811, 810, 838, 809, 808, 807, 806, 838, 838, - /* 150 */ 838, 838, 813, 812, 838, 838, 838, 838, 838, 838, - /* 160 */ 838, 838, 838, 838, 789, 783, 779, 838, 838, 838, - /* 170 */ 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, - /* 180 */ 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, - /* 190 */ 838, 838, 823, 838, 753, 838, 838, 838, 838, 838, - /* 200 */ 838, 773, 838, 765, 838, 838, 838, 838, 838, 838, - /* 210 */ 725, 838, 838, 838, 838, 838, 838, 838, 838, 838, - /* 220 */ 838, 691, 838, 838, 838, 838, 838, 838, 838, 831, - /* 230 */ 838, 838, 838, 719, 829, 838, 838, 838, 838, 838, - /* 240 */ 838, 838, 838, 838, 838, 838, 838, 838, 642, 640, - /* 250 */ 838, 634, 838, + /* 0 */ 615, 667, 823, 823, 615, 615, 615, 615, 615, 615, + /* 10 */ 753, 633, 823, 615, 615, 615, 615, 615, 615, 615, + /* 20 */ 615, 615, 669, 656, 669, 669, 748, 615, 615, 615, + /* 30 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 40 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 50 */ 615, 615, 615, 615, 615, 615, 615, 615, 772, 772, + /* 60 */ 746, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 70 */ 615, 615, 615, 615, 615, 615, 615, 615, 654, 615, + /* 80 */ 652, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 90 */ 615, 615, 641, 615, 615, 615, 635, 635, 615, 615, + /* 100 */ 635, 779, 783, 777, 765, 773, 764, 760, 759, 787, + /* 110 */ 615, 635, 635, 664, 664, 635, 685, 683, 681, 673, + /* 120 */ 679, 675, 677, 671, 635, 662, 635, 662, 700, 713, + /* 130 */ 615, 788, 822, 778, 806, 805, 818, 812, 811, 615, + /* 140 */ 810, 809, 808, 807, 615, 615, 615, 615, 814, 813, + /* 150 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 790, + /* 160 */ 784, 780, 615, 615, 615, 615, 615, 615, 615, 615, + /* 170 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 180 */ 615, 615, 615, 615, 615, 745, 615, 615, 754, 615, + /* 190 */ 615, 615, 615, 615, 615, 774, 615, 766, 615, 615, + /* 200 */ 615, 615, 615, 615, 722, 615, 615, 615, 615, 615, + /* 210 */ 615, 615, 615, 615, 615, 688, 615, 615, 615, 615, + /* 220 */ 615, 615, 615, 827, 615, 615, 615, 716, 825, 615, + /* 230 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 240 */ 615, 615, 639, 637, 615, 631, 615, }; /********** End of lemon-generated parsing tables *****************************/ @@ -516,6 +527,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* COMMA => nothing */ 1, /* NULL => ID */ 0, /* SELECT => nothing */ + 0, /* UNION => nothing */ + 1, /* ALL => ID */ 0, /* FROM => nothing */ 0, /* VARIABLE => nothing */ 0, /* INTERVAL => nothing */ @@ -533,9 +546,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* SOFFSET => nothing */ 0, /* WHERE => nothing */ 1, /* NOW => ID */ - 0, /* INSERT => nothing */ - 0, /* INTO => nothing */ - 0, /* VALUES => nothing */ 0, /* RESET => nothing */ 0, /* QUERY => nothing */ 0, /* ADD => nothing */ @@ -578,7 +588,6 @@ static const YYCODETYPE yyFallback[] = { 1, /* STATEMENT => ID */ 1, /* TRIGGER => ID */ 1, /* VIEW => ID */ - 1, /* ALL => ID */ 1, /* COUNT => ID */ 1, /* SUM => ID */ 1, /* AVG => ID */ @@ -598,6 +607,12 @@ static const YYCODETYPE yyFallback[] = { 1, /* TWA => ID */ 1, /* INTERP => ID */ 1, /* LAST_ROW => ID */ + 1, /* RATE => ID */ + 1, /* IRATE => ID */ + 1, /* SUM_RATE => ID */ + 1, /* SUM_IRATE => ID */ + 1, /* AVG_RATE => ID */ + 1, /* AVG_IRATE => ID */ 1, /* SEMI => ID */ 1, /* NONE => ID */ 1, /* PREV => ID */ @@ -608,6 +623,9 @@ static const YYCODETYPE yyFallback[] = { 1, /* JOIN => ID */ 1, /* METRICS => ID */ 1, /* STABLE => ID */ + 1, /* INSERT => ID */ + 1, /* INTO => ID */ + 1, /* VALUES => ID */ }; #endif /* YYFALLBACK */ @@ -639,17 +657,21 @@ typedef struct yyStackEntry yyStackEntry; /* The state of the parser is completely contained in an instance of ** the following structure */ struct yyParser { - int yyidx; /* Index of top element in stack */ + yyStackEntry *yytos; /* Pointer to top element of the stack */ #ifdef YYTRACKMAXSTACKDEPTH - int yyidxMax; /* Maximum value of yyidx */ + int yyhwm; /* High-water mark of the stack */ #endif +#ifndef YYNOERRORRECOVERY int yyerrcnt; /* Shifts left before out of the error */ +#endif ParseARG_SDECL /* A place to hold %extra_argument */ #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ + yyStackEntry yystk0; /* First stack entry */ #else yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ + yyStackEntry *yystackEnd; /* Last entry in the stack */ #endif }; typedef struct yyParser yyParser; @@ -686,78 +708,279 @@ void ParseTrace(FILE *TraceFILE, char *zTracePrompt){ } #endif /* NDEBUG */ -#ifndef NDEBUG +#if defined(YYCOVERAGE) || !defined(NDEBUG) /* For tracing shifts, the names of all terminals and nonterminals ** are required. The following table supplies these names */ static const char *const yyTokenName[] = { - "$", "ID", "BOOL", "TINYINT", - "SMALLINT", "INTEGER", "BIGINT", "FLOAT", - "DOUBLE", "STRING", "TIMESTAMP", "BINARY", - "NCHAR", "OR", "AND", "NOT", - "EQ", "NE", "ISNULL", "NOTNULL", - "IS", "LIKE", "GLOB", "BETWEEN", - "IN", "GT", "GE", "LT", - "LE", "BITAND", "BITOR", "LSHIFT", - "RSHIFT", "PLUS", "MINUS", "DIVIDE", - "TIMES", "STAR", "SLASH", "REM", - "CONCAT", "UMINUS", "UPLUS", "BITNOT", - "SHOW", "DATABASES", "MNODES", "DNODES", - "ACCOUNTS", "USERS", "MODULES", "QUERIES", - "CONNECTIONS", "STREAMS", "CONFIGS", "SCORES", - "GRANTS", "VNODES", "IPTOKEN", "DOT", - "TABLES", "STABLES", "VGROUPS", "DROP", - "TABLE", "DATABASE", "DNODE", "USER", - "ACCOUNT", "USE", "DESCRIBE", "ALTER", - "PASS", "PRIVILEGE", "LOCAL", "IF", - "EXISTS", "CREATE", "PPS", "TSERIES", - "DBS", "STORAGE", "QTIME", "CONNS", - "STATE", "KEEP", "CACHE", "REPLICA", - "DAYS", "ROWS", "ABLOCKS", "TBLOCKS", - "CTIME", "CLOG", "COMP", "PRECISION", - "LP", "RP", "TAGS", "USING", - "AS", "COMMA", "NULL", "SELECT", - "FROM", "VARIABLE", "INTERVAL", "FILL", - "SLIDING", "ORDER", "BY", "ASC", - "DESC", "GROUP", "HAVING", "LIMIT", - "OFFSET", "SLIMIT", "SOFFSET", "WHERE", - "NOW", "INSERT", "INTO", "VALUES", - "RESET", "QUERY", "ADD", "COLUMN", - "TAG", "CHANGE", "SET", "KILL", - "CONNECTION", "COLON", "STREAM", "ABORT", - "AFTER", "ATTACH", "BEFORE", "BEGIN", - "CASCADE", "CLUSTER", "CONFLICT", "COPY", - "DEFERRED", "DELIMITERS", "DETACH", "EACH", - "END", "EXPLAIN", "FAIL", "FOR", - "IGNORE", "IMMEDIATE", "INITIALLY", "INSTEAD", - "MATCH", "KEY", "OF", "RAISE", - "REPLACE", "RESTRICT", "ROW", "STATEMENT", - "TRIGGER", "VIEW", "ALL", "COUNT", - "SUM", "AVG", "MIN", "MAX", - "FIRST", "LAST", "TOP", "BOTTOM", - "STDDEV", "PERCENTILE", "APERCENTILE", "LEASTSQUARES", - "HISTOGRAM", "DIFF", "SPREAD", "TWA", - "INTERP", "LAST_ROW", "SEMI", "NONE", - "PREV", "LINEAR", "IMPORT", "METRIC", - "TBNAME", "JOIN", "METRICS", "STABLE", - "error", "program", "cmd", "dbPrefix", - "ids", "cpxName", "ifexists", "alter_db_optr", - "acct_optr", "ifnotexists", "db_optr", "pps", - "tseries", "dbs", "streams", "storage", - "qtime", "users", "conns", "state", - "keep", "tagitemlist", "tables", "cache", - "replica", "days", "rows", "ablocks", - "tblocks", "ctime", "clog", "comp", - "prec", "typename", "signed", "create_table_args", - "columnlist", "select", "column", "tagitem", - "selcollist", "from", "where_opt", "interval_opt", - "fill_opt", "sliding_opt", "groupby_opt", "orderby_opt", - "having_opt", "slimit_opt", "limit_opt", "sclp", - "expr", "as", "tablelist", "tmvar", - "sortlist", "sortitem", "item", "sortorder", - "grouplist", "exprlist", "expritem", "insert_value_list", - "itemlist", + /* 0 */ "$", + /* 1 */ "ID", + /* 2 */ "BOOL", + /* 3 */ "TINYINT", + /* 4 */ "SMALLINT", + /* 5 */ "INTEGER", + /* 6 */ "BIGINT", + /* 7 */ "FLOAT", + /* 8 */ "DOUBLE", + /* 9 */ "STRING", + /* 10 */ "TIMESTAMP", + /* 11 */ "BINARY", + /* 12 */ "NCHAR", + /* 13 */ "OR", + /* 14 */ "AND", + /* 15 */ "NOT", + /* 16 */ "EQ", + /* 17 */ "NE", + /* 18 */ "ISNULL", + /* 19 */ "NOTNULL", + /* 20 */ "IS", + /* 21 */ "LIKE", + /* 22 */ "GLOB", + /* 23 */ "BETWEEN", + /* 24 */ "IN", + /* 25 */ "GT", + /* 26 */ "GE", + /* 27 */ "LT", + /* 28 */ "LE", + /* 29 */ "BITAND", + /* 30 */ "BITOR", + /* 31 */ "LSHIFT", + /* 32 */ "RSHIFT", + /* 33 */ "PLUS", + /* 34 */ "MINUS", + /* 35 */ "DIVIDE", + /* 36 */ "TIMES", + /* 37 */ "STAR", + /* 38 */ "SLASH", + /* 39 */ "REM", + /* 40 */ "CONCAT", + /* 41 */ "UMINUS", + /* 42 */ "UPLUS", + /* 43 */ "BITNOT", + /* 44 */ "SHOW", + /* 45 */ "DATABASES", + /* 46 */ "MNODES", + /* 47 */ "DNODES", + /* 48 */ "ACCOUNTS", + /* 49 */ "USERS", + /* 50 */ "MODULES", + /* 51 */ "QUERIES", + /* 52 */ "CONNECTIONS", + /* 53 */ "STREAMS", + /* 54 */ "CONFIGS", + /* 55 */ "SCORES", + /* 56 */ "GRANTS", + /* 57 */ "VNODES", + /* 58 */ "IPTOKEN", + /* 59 */ "DOT", + /* 60 */ "TABLES", + /* 61 */ "STABLES", + /* 62 */ "VGROUPS", + /* 63 */ "DROP", + /* 64 */ "TABLE", + /* 65 */ "DATABASE", + /* 66 */ "DNODE", + /* 67 */ "USER", + /* 68 */ "ACCOUNT", + /* 69 */ "USE", + /* 70 */ "DESCRIBE", + /* 71 */ "ALTER", + /* 72 */ "PASS", + /* 73 */ "PRIVILEGE", + /* 74 */ "LOCAL", + /* 75 */ "IF", + /* 76 */ "EXISTS", + /* 77 */ "CREATE", + /* 78 */ "PPS", + /* 79 */ "TSERIES", + /* 80 */ "DBS", + /* 81 */ "STORAGE", + /* 82 */ "QTIME", + /* 83 */ "CONNS", + /* 84 */ "STATE", + /* 85 */ "KEEP", + /* 86 */ "CACHE", + /* 87 */ "REPLICA", + /* 88 */ "DAYS", + /* 89 */ "ROWS", + /* 90 */ "ABLOCKS", + /* 91 */ "TBLOCKS", + /* 92 */ "CTIME", + /* 93 */ "CLOG", + /* 94 */ "COMP", + /* 95 */ "PRECISION", + /* 96 */ "LP", + /* 97 */ "RP", + /* 98 */ "TAGS", + /* 99 */ "USING", + /* 100 */ "AS", + /* 101 */ "COMMA", + /* 102 */ "NULL", + /* 103 */ "SELECT", + /* 104 */ "UNION", + /* 105 */ "ALL", + /* 106 */ "FROM", + /* 107 */ "VARIABLE", + /* 108 */ "INTERVAL", + /* 109 */ "FILL", + /* 110 */ "SLIDING", + /* 111 */ "ORDER", + /* 112 */ "BY", + /* 113 */ "ASC", + /* 114 */ "DESC", + /* 115 */ "GROUP", + /* 116 */ "HAVING", + /* 117 */ "LIMIT", + /* 118 */ "OFFSET", + /* 119 */ "SLIMIT", + /* 120 */ "SOFFSET", + /* 121 */ "WHERE", + /* 122 */ "NOW", + /* 123 */ "RESET", + /* 124 */ "QUERY", + /* 125 */ "ADD", + /* 126 */ "COLUMN", + /* 127 */ "TAG", + /* 128 */ "CHANGE", + /* 129 */ "SET", + /* 130 */ "KILL", + /* 131 */ "CONNECTION", + /* 132 */ "COLON", + /* 133 */ "STREAM", + /* 134 */ "ABORT", + /* 135 */ "AFTER", + /* 136 */ "ATTACH", + /* 137 */ "BEFORE", + /* 138 */ "BEGIN", + /* 139 */ "CASCADE", + /* 140 */ "CLUSTER", + /* 141 */ "CONFLICT", + /* 142 */ "COPY", + /* 143 */ "DEFERRED", + /* 144 */ "DELIMITERS", + /* 145 */ "DETACH", + /* 146 */ "EACH", + /* 147 */ "END", + /* 148 */ "EXPLAIN", + /* 149 */ "FAIL", + /* 150 */ "FOR", + /* 151 */ "IGNORE", + /* 152 */ "IMMEDIATE", + /* 153 */ "INITIALLY", + /* 154 */ "INSTEAD", + /* 155 */ "MATCH", + /* 156 */ "KEY", + /* 157 */ "OF", + /* 158 */ "RAISE", + /* 159 */ "REPLACE", + /* 160 */ "RESTRICT", + /* 161 */ "ROW", + /* 162 */ "STATEMENT", + /* 163 */ "TRIGGER", + /* 164 */ "VIEW", + /* 165 */ "COUNT", + /* 166 */ "SUM", + /* 167 */ "AVG", + /* 168 */ "MIN", + /* 169 */ "MAX", + /* 170 */ "FIRST", + /* 171 */ "LAST", + /* 172 */ "TOP", + /* 173 */ "BOTTOM", + /* 174 */ "STDDEV", + /* 175 */ "PERCENTILE", + /* 176 */ "APERCENTILE", + /* 177 */ "LEASTSQUARES", + /* 178 */ "HISTOGRAM", + /* 179 */ "DIFF", + /* 180 */ "SPREAD", + /* 181 */ "TWA", + /* 182 */ "INTERP", + /* 183 */ "LAST_ROW", + /* 184 */ "RATE", + /* 185 */ "IRATE", + /* 186 */ "SUM_RATE", + /* 187 */ "SUM_IRATE", + /* 188 */ "AVG_RATE", + /* 189 */ "AVG_IRATE", + /* 190 */ "SEMI", + /* 191 */ "NONE", + /* 192 */ "PREV", + /* 193 */ "LINEAR", + /* 194 */ "IMPORT", + /* 195 */ "METRIC", + /* 196 */ "TBNAME", + /* 197 */ "JOIN", + /* 198 */ "METRICS", + /* 199 */ "STABLE", + /* 200 */ "INSERT", + /* 201 */ "INTO", + /* 202 */ "VALUES", + /* 203 */ "error", + /* 204 */ "program", + /* 205 */ "cmd", + /* 206 */ "dbPrefix", + /* 207 */ "ids", + /* 208 */ "cpxName", + /* 209 */ "ifexists", + /* 210 */ "alter_db_optr", + /* 211 */ "acct_optr", + /* 212 */ "ifnotexists", + /* 213 */ "db_optr", + /* 214 */ "pps", + /* 215 */ "tseries", + /* 216 */ "dbs", + /* 217 */ "streams", + /* 218 */ "storage", + /* 219 */ "qtime", + /* 220 */ "users", + /* 221 */ "conns", + /* 222 */ "state", + /* 223 */ "keep", + /* 224 */ "tagitemlist", + /* 225 */ "tables", + /* 226 */ "cache", + /* 227 */ "replica", + /* 228 */ "days", + /* 229 */ "rows", + /* 230 */ "ablocks", + /* 231 */ "tblocks", + /* 232 */ "ctime", + /* 233 */ "clog", + /* 234 */ "comp", + /* 235 */ "prec", + /* 236 */ "typename", + /* 237 */ "signed", + /* 238 */ "create_table_args", + /* 239 */ "columnlist", + /* 240 */ "select", + /* 241 */ "column", + /* 242 */ "tagitem", + /* 243 */ "selcollist", + /* 244 */ "from", + /* 245 */ "where_opt", + /* 246 */ "interval_opt", + /* 247 */ "fill_opt", + /* 248 */ "sliding_opt", + /* 249 */ "groupby_opt", + /* 250 */ "orderby_opt", + /* 251 */ "having_opt", + /* 252 */ "slimit_opt", + /* 253 */ "limit_opt", + /* 254 */ "union", + /* 255 */ "sclp", + /* 256 */ "expr", + /* 257 */ "as", + /* 258 */ "tablelist", + /* 259 */ "tmvar", + /* 260 */ "sortlist", + /* 261 */ "sortitem", + /* 262 */ "item", + /* 263 */ "sortorder", + /* 264 */ "grouplist", + /* 265 */ "exprlist", + /* 266 */ "expritem", }; -#endif /* NDEBUG */ +#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ #ifndef NDEBUG /* For tracing reduce actions, the names of all rules are required. @@ -885,125 +1108,134 @@ static const char *const yyRuleName[] = { /* 119 */ "tagitem ::= MINUS FLOAT", /* 120 */ "tagitem ::= PLUS INTEGER", /* 121 */ "tagitem ::= PLUS FLOAT", - /* 122 */ "cmd ::= select", - /* 123 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 124 */ "select ::= SELECT selcollist", - /* 125 */ "sclp ::= selcollist COMMA", - /* 126 */ "sclp ::=", - /* 127 */ "selcollist ::= sclp expr as", - /* 128 */ "selcollist ::= sclp STAR", - /* 129 */ "as ::= AS ids", - /* 130 */ "as ::= ids", - /* 131 */ "as ::=", - /* 132 */ "from ::= FROM tablelist", - /* 133 */ "tablelist ::= ids cpxName", - /* 134 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 135 */ "tmvar ::= VARIABLE", - /* 136 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 137 */ "interval_opt ::=", - /* 138 */ "fill_opt ::=", - /* 139 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 140 */ "fill_opt ::= FILL LP ID RP", - /* 141 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 142 */ "sliding_opt ::=", - /* 143 */ "orderby_opt ::=", - /* 144 */ "orderby_opt ::= ORDER BY sortlist", - /* 145 */ "sortlist ::= sortlist COMMA item sortorder", - /* 146 */ "sortlist ::= item sortorder", - /* 147 */ "item ::= ids cpxName", - /* 148 */ "sortorder ::= ASC", - /* 149 */ "sortorder ::= DESC", - /* 150 */ "sortorder ::=", - /* 151 */ "groupby_opt ::=", - /* 152 */ "groupby_opt ::= GROUP BY grouplist", - /* 153 */ "grouplist ::= grouplist COMMA item", - /* 154 */ "grouplist ::= item", - /* 155 */ "having_opt ::=", - /* 156 */ "having_opt ::= HAVING expr", - /* 157 */ "limit_opt ::=", - /* 158 */ "limit_opt ::= LIMIT signed", - /* 159 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 160 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 161 */ "slimit_opt ::=", - /* 162 */ "slimit_opt ::= SLIMIT signed", - /* 163 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 164 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 165 */ "where_opt ::=", - /* 166 */ "where_opt ::= WHERE expr", - /* 167 */ "expr ::= LP expr RP", - /* 168 */ "expr ::= ID", - /* 169 */ "expr ::= ID DOT ID", - /* 170 */ "expr ::= ID DOT STAR", - /* 171 */ "expr ::= INTEGER", - /* 172 */ "expr ::= MINUS INTEGER", - /* 173 */ "expr ::= PLUS INTEGER", - /* 174 */ "expr ::= FLOAT", - /* 175 */ "expr ::= MINUS FLOAT", - /* 176 */ "expr ::= PLUS FLOAT", - /* 177 */ "expr ::= STRING", - /* 178 */ "expr ::= NOW", - /* 179 */ "expr ::= VARIABLE", - /* 180 */ "expr ::= BOOL", - /* 181 */ "expr ::= ID LP exprlist RP", - /* 182 */ "expr ::= ID LP STAR RP", - /* 183 */ "expr ::= expr AND expr", - /* 184 */ "expr ::= expr OR expr", - /* 185 */ "expr ::= expr LT expr", - /* 186 */ "expr ::= expr GT expr", - /* 187 */ "expr ::= expr LE expr", - /* 188 */ "expr ::= expr GE expr", - /* 189 */ "expr ::= expr NE expr", - /* 190 */ "expr ::= expr EQ expr", - /* 191 */ "expr ::= expr PLUS expr", - /* 192 */ "expr ::= expr MINUS expr", - /* 193 */ "expr ::= expr STAR expr", - /* 194 */ "expr ::= expr SLASH expr", - /* 195 */ "expr ::= expr REM expr", - /* 196 */ "expr ::= expr LIKE expr", - /* 197 */ "expr ::= expr IN LP exprlist RP", - /* 198 */ "exprlist ::= exprlist COMMA expritem", - /* 199 */ "exprlist ::= expritem", - /* 200 */ "expritem ::= expr", - /* 201 */ "expritem ::=", - /* 202 */ "cmd ::= INSERT INTO cpxName insert_value_list", - /* 203 */ "insert_value_list ::= VALUES LP itemlist RP", - /* 204 */ "insert_value_list ::= insert_value_list VALUES LP itemlist RP", - /* 205 */ "itemlist ::= itemlist COMMA expr", - /* 206 */ "itemlist ::= expr", - /* 207 */ "cmd ::= RESET QUERY CACHE", - /* 208 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 209 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 210 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 211 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 212 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 213 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 214 */ "cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER", - /* 215 */ "cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER", - /* 216 */ "cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER", + /* 122 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 123 */ "union ::= select", + /* 124 */ "union ::= LP union RP", + /* 125 */ "union ::= union UNION ALL select", + /* 126 */ "union ::= union UNION ALL LP select RP", + /* 127 */ "cmd ::= union", + /* 128 */ "select ::= SELECT selcollist", + /* 129 */ "sclp ::= selcollist COMMA", + /* 130 */ "sclp ::=", + /* 131 */ "selcollist ::= sclp expr as", + /* 132 */ "selcollist ::= sclp STAR", + /* 133 */ "as ::= AS ids", + /* 134 */ "as ::= ids", + /* 135 */ "as ::=", + /* 136 */ "from ::= FROM tablelist", + /* 137 */ "tablelist ::= ids cpxName", + /* 138 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 139 */ "tmvar ::= VARIABLE", + /* 140 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 141 */ "interval_opt ::=", + /* 142 */ "fill_opt ::=", + /* 143 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 144 */ "fill_opt ::= FILL LP ID RP", + /* 145 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 146 */ "sliding_opt ::=", + /* 147 */ "orderby_opt ::=", + /* 148 */ "orderby_opt ::= ORDER BY sortlist", + /* 149 */ "sortlist ::= sortlist COMMA item sortorder", + /* 150 */ "sortlist ::= item sortorder", + /* 151 */ "item ::= ids cpxName", + /* 152 */ "sortorder ::= ASC", + /* 153 */ "sortorder ::= DESC", + /* 154 */ "sortorder ::=", + /* 155 */ "groupby_opt ::=", + /* 156 */ "groupby_opt ::= GROUP BY grouplist", + /* 157 */ "grouplist ::= grouplist COMMA item", + /* 158 */ "grouplist ::= item", + /* 159 */ "having_opt ::=", + /* 160 */ "having_opt ::= HAVING expr", + /* 161 */ "limit_opt ::=", + /* 162 */ "limit_opt ::= LIMIT signed", + /* 163 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 164 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 165 */ "slimit_opt ::=", + /* 166 */ "slimit_opt ::= SLIMIT signed", + /* 167 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 168 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 169 */ "where_opt ::=", + /* 170 */ "where_opt ::= WHERE expr", + /* 171 */ "expr ::= LP expr RP", + /* 172 */ "expr ::= ID", + /* 173 */ "expr ::= ID DOT ID", + /* 174 */ "expr ::= ID DOT STAR", + /* 175 */ "expr ::= INTEGER", + /* 176 */ "expr ::= MINUS INTEGER", + /* 177 */ "expr ::= PLUS INTEGER", + /* 178 */ "expr ::= FLOAT", + /* 179 */ "expr ::= MINUS FLOAT", + /* 180 */ "expr ::= PLUS FLOAT", + /* 181 */ "expr ::= STRING", + /* 182 */ "expr ::= NOW", + /* 183 */ "expr ::= VARIABLE", + /* 184 */ "expr ::= BOOL", + /* 185 */ "expr ::= ID LP exprlist RP", + /* 186 */ "expr ::= ID LP STAR RP", + /* 187 */ "expr ::= expr AND expr", + /* 188 */ "expr ::= expr OR expr", + /* 189 */ "expr ::= expr LT expr", + /* 190 */ "expr ::= expr GT expr", + /* 191 */ "expr ::= expr LE expr", + /* 192 */ "expr ::= expr GE expr", + /* 193 */ "expr ::= expr NE expr", + /* 194 */ "expr ::= expr EQ expr", + /* 195 */ "expr ::= expr PLUS expr", + /* 196 */ "expr ::= expr MINUS expr", + /* 197 */ "expr ::= expr STAR expr", + /* 198 */ "expr ::= expr SLASH expr", + /* 199 */ "expr ::= expr REM expr", + /* 200 */ "expr ::= expr LIKE expr", + /* 201 */ "expr ::= expr IN LP exprlist RP", + /* 202 */ "exprlist ::= exprlist COMMA expritem", + /* 203 */ "exprlist ::= expritem", + /* 204 */ "expritem ::= expr", + /* 205 */ "expritem ::=", + /* 206 */ "cmd ::= RESET QUERY CACHE", + /* 207 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 208 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 209 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 210 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 211 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 212 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 213 */ "cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER", + /* 214 */ "cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER", + /* 215 */ "cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER", }; #endif /* NDEBUG */ #if YYSTACKDEPTH<=0 /* -** Try to increase the size of the parser stack. +** Try to increase the size of the parser stack. Return the number +** of errors. Return 0 on success. */ -static void yyGrowStack(yyParser *p){ +static int yyGrowStack(yyParser *p){ int newSize; + int idx; yyStackEntry *pNew; newSize = p->yystksz*2 + 100; - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; + if( p->yystack==&p->yystk0 ){ + pNew = malloc(newSize*sizeof(pNew[0])); + if( pNew ) pNew[0] = p->yystk0; + }else{ + pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + } if( pNew ){ p->yystack = pNew; - p->yystksz = newSize; + p->yytos = &p->yystack[idx]; #ifndef NDEBUG if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows to %d entries!\n", - yyTracePrompt, p->yystksz); + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, p->yystksz, newSize); } #endif + p->yystksz = newSize; } + return pNew==0; } #endif @@ -1016,6 +1248,34 @@ static void yyGrowStack(yyParser *p){ # define YYMALLOCARGTYPE size_t #endif +/* Initialize a new parser that has already been allocated. +*/ +void ParseInit(void *yypParser){ + yyParser *pParser = (yyParser*)yypParser; +#ifdef YYTRACKMAXSTACKDEPTH + pParser->yyhwm = 0; +#endif +#if YYSTACKDEPTH<=0 + pParser->yytos = NULL; + pParser->yystack = NULL; + pParser->yystksz = 0; + if( yyGrowStack(pParser) ){ + pParser->yystack = &pParser->yystk0; + pParser->yystksz = 1; + } +#endif +#ifndef YYNOERRORRECOVERY + pParser->yyerrcnt = -1; +#endif + pParser->yytos = pParser->yystack; + pParser->yystack[0].stateno = 0; + pParser->yystack[0].major = 0; +#if YYSTACKDEPTH>0 + pParser->yystackEnd = &pParser->yystack[YYSTACKDEPTH-1]; +#endif +} + +#ifndef Parse_ENGINEALWAYSONSTACK /* ** This function allocates a new parser. ** The only argument is a pointer to a function which works like @@ -1031,19 +1291,11 @@ static void yyGrowStack(yyParser *p){ void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ yyParser *pParser; pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( pParser ){ - pParser->yyidx = -1; -#ifdef YYTRACKMAXSTACKDEPTH - pParser->yyidxMax = 0; -#endif -#if YYSTACKDEPTH<=0 - pParser->yystack = NULL; - pParser->yystksz = 0; - yyGrowStack(pParser); -#endif - } + if( pParser ) ParseInit(pParser); return pParser; } +#endif /* Parse_ENGINEALWAYSONSTACK */ + /* The following function deletes the "minor type" or semantic value ** associated with a symbol. The symbol can be either a terminal @@ -1070,46 +1322,50 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 216: /* keep */ - case 217: /* tagitemlist */ - case 240: /* fill_opt */ - case 242: /* groupby_opt */ - case 243: /* orderby_opt */ - case 252: /* sortlist */ - case 256: /* grouplist */ + case 223: /* keep */ + case 224: /* tagitemlist */ + case 247: /* fill_opt */ + case 249: /* groupby_opt */ + case 250: /* orderby_opt */ + case 260: /* sortlist */ + case 264: /* grouplist */ { -tVariantListDestroy((yypminor->yy480)); +tVariantListDestroy((yypminor->yy30)); } break; - case 232: /* columnlist */ + case 239: /* columnlist */ { -tFieldListDestroy((yypminor->yy421)); +tFieldListDestroy((yypminor->yy325)); } break; - case 233: /* select */ + case 240: /* select */ { -destroyQuerySql((yypminor->yy138)); +doDestroyQuerySql((yypminor->yy444)); } break; - case 236: /* selcollist */ - case 247: /* sclp */ - case 257: /* exprlist */ - case 260: /* itemlist */ + case 243: /* selcollist */ + case 255: /* sclp */ + case 265: /* exprlist */ { -tSQLExprListDestroy((yypminor->yy284)); +tSQLExprListDestroy((yypminor->yy506)); } break; - case 238: /* where_opt */ - case 244: /* having_opt */ - case 248: /* expr */ - case 258: /* expritem */ + case 245: /* where_opt */ + case 251: /* having_opt */ + case 256: /* expr */ + case 266: /* expritem */ { -tSQLExprDestroy((yypminor->yy244)); +tSQLExprDestroy((yypminor->yy388)); } break; - case 253: /* sortitem */ + case 254: /* union */ { -tVariantDestroy(&(yypminor->yy236)); +destroyAllSelectClause((yypminor->yy309)); +} + break; + case 261: /* sortitem */ +{ +tVariantDestroy(&(yypminor->yy380)); } break; /********* End destructor definitions *****************************************/ @@ -1125,8 +1381,9 @@ tVariantDestroy(&(yypminor->yy236)); */ static void yy_pop_parser_stack(yyParser *pParser){ yyStackEntry *yytos; - assert( pParser->yyidx>=0 ); - yytos = &pParser->yystack[pParser->yyidx--]; + assert( pParser->yytos!=0 ); + assert( pParser->yytos > pParser->yystack ); + yytos = pParser->yytos--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sPopping %s\n", @@ -1137,6 +1394,18 @@ static void yy_pop_parser_stack(yyParser *pParser){ yy_destructor(pParser, yytos->major, &yytos->minor); } +/* +** Clear all secondary memory allocations from the parser +*/ +void ParseFinalize(void *p){ + yyParser *pParser = (yyParser*)p; + while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); +#if YYSTACKDEPTH<=0 + if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); +#endif +} + +#ifndef Parse_ENGINEALWAYSONSTACK /* ** Deallocate and destroy a parser. Destructors are called for ** all stack elements before shutting the parser down. @@ -1149,16 +1418,13 @@ void ParseFree( void *p, /* The parser to be deleted */ void (*freeProc)(void*) /* Function used to reclaim memory */ ){ - yyParser *pParser = (yyParser*)p; #ifndef YYPARSEFREENEVERNULL - if( pParser==0 ) return; -#endif - while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - free(pParser->yystack); + if( p==0 ) return; #endif - (*freeProc)((void*)pParser); + ParseFinalize(p); + (*freeProc)(p); } +#endif /* Parse_ENGINEALWAYSONSTACK */ /* ** Return the peak depth of the stack for a parser. @@ -1166,7 +1432,44 @@ void ParseFree( #ifdef YYTRACKMAXSTACKDEPTH int ParseStackPeak(void *p){ yyParser *pParser = (yyParser*)p; - return pParser->yyidxMax; + return pParser->yyhwm; +} +#endif + +/* This array of booleans keeps track of the parser statement +** coverage. The element yycoverage[X][Y] is set when the parser +** is in state X and has a lookahead token Y. In a well-tested +** systems, every element of this matrix should end up being set. +*/ +#if defined(YYCOVERAGE) +static unsigned char yycoverage[YYNSTATE][YYNTOKEN]; +#endif + +/* +** Write into out a description of every state/lookahead combination that +** +** (1) has not been used by the parser, and +** (2) is not a syntax error. +** +** Return the number of missed state/lookahead combinations. +*/ +#if defined(YYCOVERAGE) +int ParseCoverage(FILE *out){ + int stateno, iLookAhead, i; + int nMissed = 0; + for(stateno=0; statenoyystack[pParser->yyidx].stateno; + int stateno = pParser->yytos->stateno; - if( stateno>=YY_MIN_REDUCE ) return stateno; + if( stateno>YY_MAX_SHIFT ) return stateno; assert( stateno <= YY_SHIFT_COUNT ); +#if defined(YYCOVERAGE) + yycoverage[stateno][iLookAhead] = 1; +#endif do{ i = yy_shift_ofst[stateno]; - if( i==YY_SHIFT_USE_DFLT ) return yy_default[stateno]; + assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); assert( iLookAhead!=YYNOCODE ); + assert( iLookAhead < YYNTOKEN ); i += iLookAhead; - if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ - if( iLookAhead>0 ){ + if( yy_lookahead[i]!=iLookAhead ){ #ifdef YYFALLBACK - YYCODETYPE iFallback; /* Fallback token */ - if( iLookAhead %s\n", - yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); - } -#endif - assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ - iLookAhead = iFallback; - continue; + if( yyTraceFILE ){ + fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n", + yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); } +#endif + assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ + iLookAhead = iFallback; + continue; + } #endif #ifdef YYWILDCARD - { - int j = i - iLookAhead + YYWILDCARD; - if( + { + int j = i - iLookAhead + YYWILDCARD; + if( #if YY_SHIFT_MIN+YYWILDCARD<0 - j>=0 && + j>=0 && #endif #if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT - j0 + ){ #ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", - yyTracePrompt, yyTokenName[iLookAhead], - yyTokenName[YYWILDCARD]); - } -#endif /* NDEBUG */ - return yy_action[j]; + if( yyTraceFILE ){ + fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", + yyTracePrompt, yyTokenName[iLookAhead], + yyTokenName[YYWILDCARD]); } +#endif /* NDEBUG */ + return yy_action[j]; } -#endif /* YYWILDCARD */ } +#endif /* YYWILDCARD */ return yy_default[stateno]; }else{ return yy_action[i]; @@ -1253,7 +1558,6 @@ static int yy_find_reduce_action( assert( stateno<=YY_REDUCE_COUNT ); #endif i = yy_reduce_ofst[stateno]; - assert( i!=YY_REDUCE_USE_DFLT ); assert( iLookAhead!=YYNOCODE ); i += iLookAhead; #ifdef YYERRORSYMBOL @@ -1270,15 +1574,14 @@ static int yy_find_reduce_action( /* ** The following routine is called if the stack overflows. */ -static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){ +static void yyStackOverflow(yyParser *yypParser){ ParseARG_FETCH; - yypParser->yyidx--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will execute if the parser ** stack every overflows */ /******** Begin %stack_overflow code ******************************************/ @@ -1290,20 +1593,21 @@ static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){ ** Print tracing information for a SHIFT action */ #ifndef NDEBUG -static void yyTraceShift(yyParser *yypParser, int yyNewState){ +static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){ if( yyTraceFILE ){ if( yyNewStateyystack[yypParser->yyidx].major], + fprintf(yyTraceFILE,"%s%s '%s', go to state %d\n", + yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major], yyNewState); }else{ - fprintf(yyTraceFILE,"%sShift '%s'\n", - yyTracePrompt,yyTokenName[yypParser->yystack[yypParser->yyidx].major]); + fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n", + yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major], + yyNewState - YY_MIN_REDUCE); } } } #else -# define yyTraceShift(X,Y) +# define yyTraceShift(X,Y,Z) #endif /* @@ -1313,260 +1617,264 @@ static void yy_shift( yyParser *yypParser, /* The parser to be shifted */ int yyNewState, /* The new state to shift in */ int yyMajor, /* The major token to shift in */ - YYMINORTYPE *yypMinor /* Pointer to the minor token to shift in */ + ParseTOKENTYPE yyMinor /* The minor token to shift in */ ){ yyStackEntry *yytos; - yypParser->yyidx++; + yypParser->yytos++; #ifdef YYTRACKMAXSTACKDEPTH - if( yypParser->yyidx>yypParser->yyidxMax ){ - yypParser->yyidxMax = yypParser->yyidx; + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); } #endif #if YYSTACKDEPTH>0 - if( yypParser->yyidx>=YYSTACKDEPTH ){ - yyStackOverflow(yypParser, yypMinor); + if( yypParser->yytos>yypParser->yystackEnd ){ + yypParser->yytos--; + yyStackOverflow(yypParser); return; } #else - if( yypParser->yyidx>=yypParser->yystksz ){ - yyGrowStack(yypParser); - if( yypParser->yyidx>=yypParser->yystksz ){ - yyStackOverflow(yypParser, yypMinor); + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + if( yyGrowStack(yypParser) ){ + yypParser->yytos--; + yyStackOverflow(yypParser); return; } } #endif - yytos = &yypParser->yystack[yypParser->yyidx]; + if( yyNewState > YY_MAX_SHIFT ){ + yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } + yytos = yypParser->yytos; yytos->stateno = (YYACTIONTYPE)yyNewState; yytos->major = (YYCODETYPE)yyMajor; - yytos->minor = *yypMinor; - yyTraceShift(yypParser, yyNewState); + yytos->minor.yy0 = yyMinor; + yyTraceShift(yypParser, yyNewState, "Shift"); } /* The following table contains information about every rule that ** is used during the reduce. */ static const struct { - YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ - unsigned char nrhs; /* Number of right-hand side symbols in the rule */ + YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ + signed char nrhs; /* Negative of the number of RHS symbols in the rule */ } yyRuleInfo[] = { - { 197, 1 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 3 }, - { 199, 0 }, - { 199, 2 }, - { 201, 0 }, - { 201, 2 }, - { 198, 3 }, - { 198, 5 }, - { 198, 3 }, - { 198, 5 }, - { 198, 3 }, - { 198, 4 }, - { 198, 5 }, - { 198, 4 }, - { 198, 3 }, - { 198, 3 }, - { 198, 3 }, - { 198, 2 }, - { 198, 3 }, - { 198, 5 }, - { 198, 5 }, - { 198, 4 }, - { 198, 5 }, - { 198, 3 }, - { 198, 4 }, - { 198, 4 }, - { 198, 4 }, - { 198, 6 }, - { 200, 1 }, - { 200, 1 }, - { 202, 2 }, - { 202, 0 }, - { 205, 3 }, - { 205, 0 }, - { 198, 3 }, - { 198, 6 }, - { 198, 5 }, - { 198, 5 }, - { 207, 0 }, - { 207, 2 }, - { 208, 0 }, - { 208, 2 }, - { 209, 0 }, - { 209, 2 }, - { 210, 0 }, - { 210, 2 }, - { 211, 0 }, - { 211, 2 }, - { 212, 0 }, - { 212, 2 }, - { 213, 0 }, - { 213, 2 }, - { 214, 0 }, - { 214, 2 }, - { 215, 0 }, - { 215, 2 }, - { 204, 9 }, - { 216, 2 }, - { 218, 2 }, - { 219, 2 }, - { 220, 2 }, - { 221, 2 }, - { 222, 2 }, - { 223, 2 }, - { 224, 2 }, - { 225, 2 }, - { 226, 2 }, - { 227, 2 }, - { 228, 2 }, - { 206, 0 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 203, 0 }, - { 203, 2 }, - { 203, 2 }, - { 229, 1 }, - { 229, 4 }, - { 230, 1 }, - { 230, 2 }, - { 230, 2 }, - { 198, 6 }, - { 231, 3 }, - { 231, 7 }, - { 231, 7 }, - { 231, 2 }, - { 232, 3 }, - { 232, 1 }, - { 234, 2 }, - { 217, 3 }, - { 217, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 2 }, - { 235, 2 }, - { 235, 2 }, - { 235, 2 }, - { 198, 1 }, - { 233, 12 }, - { 233, 2 }, - { 247, 2 }, - { 247, 0 }, - { 236, 3 }, - { 236, 2 }, - { 249, 2 }, - { 249, 1 }, - { 249, 0 }, - { 237, 2 }, - { 250, 2 }, - { 250, 4 }, - { 251, 1 }, - { 239, 4 }, - { 239, 0 }, - { 240, 0 }, - { 240, 6 }, - { 240, 4 }, - { 241, 4 }, - { 241, 0 }, - { 243, 0 }, - { 243, 3 }, - { 252, 4 }, - { 252, 2 }, - { 254, 2 }, - { 255, 1 }, - { 255, 1 }, - { 255, 0 }, - { 242, 0 }, - { 242, 3 }, - { 256, 3 }, - { 256, 1 }, - { 244, 0 }, - { 244, 2 }, - { 246, 0 }, - { 246, 2 }, - { 246, 4 }, - { 246, 4 }, - { 245, 0 }, - { 245, 2 }, - { 245, 4 }, - { 245, 4 }, - { 238, 0 }, - { 238, 2 }, - { 248, 3 }, - { 248, 1 }, - { 248, 3 }, - { 248, 3 }, - { 248, 1 }, - { 248, 2 }, - { 248, 2 }, - { 248, 1 }, - { 248, 2 }, - { 248, 2 }, - { 248, 1 }, - { 248, 1 }, - { 248, 1 }, - { 248, 1 }, - { 248, 4 }, - { 248, 4 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 5 }, - { 257, 3 }, - { 257, 1 }, - { 258, 1 }, - { 258, 0 }, - { 198, 4 }, - { 259, 4 }, - { 259, 5 }, - { 260, 3 }, - { 260, 1 }, - { 198, 3 }, - { 198, 7 }, - { 198, 7 }, - { 198, 7 }, - { 198, 7 }, - { 198, 8 }, - { 198, 9 }, - { 198, 5 }, - { 198, 7 }, - { 198, 7 }, + { 204, -1 }, /* (0) program ::= cmd */ + { 205, -2 }, /* (1) cmd ::= SHOW DATABASES */ + { 205, -2 }, /* (2) cmd ::= SHOW MNODES */ + { 205, -2 }, /* (3) cmd ::= SHOW DNODES */ + { 205, -2 }, /* (4) cmd ::= SHOW ACCOUNTS */ + { 205, -2 }, /* (5) cmd ::= SHOW USERS */ + { 205, -2 }, /* (6) cmd ::= SHOW MODULES */ + { 205, -2 }, /* (7) cmd ::= SHOW QUERIES */ + { 205, -2 }, /* (8) cmd ::= SHOW CONNECTIONS */ + { 205, -2 }, /* (9) cmd ::= SHOW STREAMS */ + { 205, -2 }, /* (10) cmd ::= SHOW CONFIGS */ + { 205, -2 }, /* (11) cmd ::= SHOW SCORES */ + { 205, -2 }, /* (12) cmd ::= SHOW GRANTS */ + { 205, -2 }, /* (13) cmd ::= SHOW VNODES */ + { 205, -3 }, /* (14) cmd ::= SHOW VNODES IPTOKEN */ + { 206, 0 }, /* (15) dbPrefix ::= */ + { 206, -2 }, /* (16) dbPrefix ::= ids DOT */ + { 208, 0 }, /* (17) cpxName ::= */ + { 208, -2 }, /* (18) cpxName ::= DOT ids */ + { 205, -3 }, /* (19) cmd ::= SHOW dbPrefix TABLES */ + { 205, -5 }, /* (20) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + { 205, -3 }, /* (21) cmd ::= SHOW dbPrefix STABLES */ + { 205, -5 }, /* (22) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + { 205, -3 }, /* (23) cmd ::= SHOW dbPrefix VGROUPS */ + { 205, -4 }, /* (24) cmd ::= SHOW dbPrefix VGROUPS ids */ + { 205, -5 }, /* (25) cmd ::= DROP TABLE ifexists ids cpxName */ + { 205, -4 }, /* (26) cmd ::= DROP DATABASE ifexists ids */ + { 205, -3 }, /* (27) cmd ::= DROP DNODE IPTOKEN */ + { 205, -3 }, /* (28) cmd ::= DROP USER ids */ + { 205, -3 }, /* (29) cmd ::= DROP ACCOUNT ids */ + { 205, -2 }, /* (30) cmd ::= USE ids */ + { 205, -3 }, /* (31) cmd ::= DESCRIBE ids cpxName */ + { 205, -5 }, /* (32) cmd ::= ALTER USER ids PASS ids */ + { 205, -5 }, /* (33) cmd ::= ALTER USER ids PRIVILEGE ids */ + { 205, -4 }, /* (34) cmd ::= ALTER DNODE IPTOKEN ids */ + { 205, -5 }, /* (35) cmd ::= ALTER DNODE IPTOKEN ids ids */ + { 205, -3 }, /* (36) cmd ::= ALTER LOCAL ids */ + { 205, -4 }, /* (37) cmd ::= ALTER LOCAL ids ids */ + { 205, -4 }, /* (38) cmd ::= ALTER DATABASE ids alter_db_optr */ + { 205, -4 }, /* (39) cmd ::= ALTER ACCOUNT ids acct_optr */ + { 205, -6 }, /* (40) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + { 207, -1 }, /* (41) ids ::= ID */ + { 207, -1 }, /* (42) ids ::= STRING */ + { 209, -2 }, /* (43) ifexists ::= IF EXISTS */ + { 209, 0 }, /* (44) ifexists ::= */ + { 212, -3 }, /* (45) ifnotexists ::= IF NOT EXISTS */ + { 212, 0 }, /* (46) ifnotexists ::= */ + { 205, -3 }, /* (47) cmd ::= CREATE DNODE IPTOKEN */ + { 205, -6 }, /* (48) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + { 205, -5 }, /* (49) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + { 205, -5 }, /* (50) cmd ::= CREATE USER ids PASS ids */ + { 214, 0 }, /* (51) pps ::= */ + { 214, -2 }, /* (52) pps ::= PPS INTEGER */ + { 215, 0 }, /* (53) tseries ::= */ + { 215, -2 }, /* (54) tseries ::= TSERIES INTEGER */ + { 216, 0 }, /* (55) dbs ::= */ + { 216, -2 }, /* (56) dbs ::= DBS INTEGER */ + { 217, 0 }, /* (57) streams ::= */ + { 217, -2 }, /* (58) streams ::= STREAMS INTEGER */ + { 218, 0 }, /* (59) storage ::= */ + { 218, -2 }, /* (60) storage ::= STORAGE INTEGER */ + { 219, 0 }, /* (61) qtime ::= */ + { 219, -2 }, /* (62) qtime ::= QTIME INTEGER */ + { 220, 0 }, /* (63) users ::= */ + { 220, -2 }, /* (64) users ::= USERS INTEGER */ + { 221, 0 }, /* (65) conns ::= */ + { 221, -2 }, /* (66) conns ::= CONNS INTEGER */ + { 222, 0 }, /* (67) state ::= */ + { 222, -2 }, /* (68) state ::= STATE ids */ + { 211, -9 }, /* (69) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + { 223, -2 }, /* (70) keep ::= KEEP tagitemlist */ + { 225, -2 }, /* (71) tables ::= TABLES INTEGER */ + { 226, -2 }, /* (72) cache ::= CACHE INTEGER */ + { 227, -2 }, /* (73) replica ::= REPLICA INTEGER */ + { 228, -2 }, /* (74) days ::= DAYS INTEGER */ + { 229, -2 }, /* (75) rows ::= ROWS INTEGER */ + { 230, -2 }, /* (76) ablocks ::= ABLOCKS ID */ + { 231, -2 }, /* (77) tblocks ::= TBLOCKS INTEGER */ + { 232, -2 }, /* (78) ctime ::= CTIME INTEGER */ + { 233, -2 }, /* (79) clog ::= CLOG INTEGER */ + { 234, -2 }, /* (80) comp ::= COMP INTEGER */ + { 235, -2 }, /* (81) prec ::= PRECISION STRING */ + { 213, 0 }, /* (82) db_optr ::= */ + { 213, -2 }, /* (83) db_optr ::= db_optr tables */ + { 213, -2 }, /* (84) db_optr ::= db_optr cache */ + { 213, -2 }, /* (85) db_optr ::= db_optr replica */ + { 213, -2 }, /* (86) db_optr ::= db_optr days */ + { 213, -2 }, /* (87) db_optr ::= db_optr rows */ + { 213, -2 }, /* (88) db_optr ::= db_optr ablocks */ + { 213, -2 }, /* (89) db_optr ::= db_optr tblocks */ + { 213, -2 }, /* (90) db_optr ::= db_optr ctime */ + { 213, -2 }, /* (91) db_optr ::= db_optr clog */ + { 213, -2 }, /* (92) db_optr ::= db_optr comp */ + { 213, -2 }, /* (93) db_optr ::= db_optr prec */ + { 213, -2 }, /* (94) db_optr ::= db_optr keep */ + { 210, 0 }, /* (95) alter_db_optr ::= */ + { 210, -2 }, /* (96) alter_db_optr ::= alter_db_optr replica */ + { 210, -2 }, /* (97) alter_db_optr ::= alter_db_optr tables */ + { 236, -1 }, /* (98) typename ::= ids */ + { 236, -4 }, /* (99) typename ::= ids LP signed RP */ + { 237, -1 }, /* (100) signed ::= INTEGER */ + { 237, -2 }, /* (101) signed ::= PLUS INTEGER */ + { 237, -2 }, /* (102) signed ::= MINUS INTEGER */ + { 205, -6 }, /* (103) cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ + { 238, -3 }, /* (104) create_table_args ::= LP columnlist RP */ + { 238, -7 }, /* (105) create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ + { 238, -7 }, /* (106) create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ + { 238, -2 }, /* (107) create_table_args ::= AS select */ + { 239, -3 }, /* (108) columnlist ::= columnlist COMMA column */ + { 239, -1 }, /* (109) columnlist ::= column */ + { 241, -2 }, /* (110) column ::= ids typename */ + { 224, -3 }, /* (111) tagitemlist ::= tagitemlist COMMA tagitem */ + { 224, -1 }, /* (112) tagitemlist ::= tagitem */ + { 242, -1 }, /* (113) tagitem ::= INTEGER */ + { 242, -1 }, /* (114) tagitem ::= FLOAT */ + { 242, -1 }, /* (115) tagitem ::= STRING */ + { 242, -1 }, /* (116) tagitem ::= BOOL */ + { 242, -1 }, /* (117) tagitem ::= NULL */ + { 242, -2 }, /* (118) tagitem ::= MINUS INTEGER */ + { 242, -2 }, /* (119) tagitem ::= MINUS FLOAT */ + { 242, -2 }, /* (120) tagitem ::= PLUS INTEGER */ + { 242, -2 }, /* (121) tagitem ::= PLUS FLOAT */ + { 240, -12 }, /* (122) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 254, -1 }, /* (123) union ::= select */ + { 254, -3 }, /* (124) union ::= LP union RP */ + { 254, -4 }, /* (125) union ::= union UNION ALL select */ + { 254, -6 }, /* (126) union ::= union UNION ALL LP select RP */ + { 205, -1 }, /* (127) cmd ::= union */ + { 240, -2 }, /* (128) select ::= SELECT selcollist */ + { 255, -2 }, /* (129) sclp ::= selcollist COMMA */ + { 255, 0 }, /* (130) sclp ::= */ + { 243, -3 }, /* (131) selcollist ::= sclp expr as */ + { 243, -2 }, /* (132) selcollist ::= sclp STAR */ + { 257, -2 }, /* (133) as ::= AS ids */ + { 257, -1 }, /* (134) as ::= ids */ + { 257, 0 }, /* (135) as ::= */ + { 244, -2 }, /* (136) from ::= FROM tablelist */ + { 258, -2 }, /* (137) tablelist ::= ids cpxName */ + { 258, -4 }, /* (138) tablelist ::= tablelist COMMA ids cpxName */ + { 259, -1 }, /* (139) tmvar ::= VARIABLE */ + { 246, -4 }, /* (140) interval_opt ::= INTERVAL LP tmvar RP */ + { 246, 0 }, /* (141) interval_opt ::= */ + { 247, 0 }, /* (142) fill_opt ::= */ + { 247, -6 }, /* (143) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 247, -4 }, /* (144) fill_opt ::= FILL LP ID RP */ + { 248, -4 }, /* (145) sliding_opt ::= SLIDING LP tmvar RP */ + { 248, 0 }, /* (146) sliding_opt ::= */ + { 250, 0 }, /* (147) orderby_opt ::= */ + { 250, -3 }, /* (148) orderby_opt ::= ORDER BY sortlist */ + { 260, -4 }, /* (149) sortlist ::= sortlist COMMA item sortorder */ + { 260, -2 }, /* (150) sortlist ::= item sortorder */ + { 262, -2 }, /* (151) item ::= ids cpxName */ + { 263, -1 }, /* (152) sortorder ::= ASC */ + { 263, -1 }, /* (153) sortorder ::= DESC */ + { 263, 0 }, /* (154) sortorder ::= */ + { 249, 0 }, /* (155) groupby_opt ::= */ + { 249, -3 }, /* (156) groupby_opt ::= GROUP BY grouplist */ + { 264, -3 }, /* (157) grouplist ::= grouplist COMMA item */ + { 264, -1 }, /* (158) grouplist ::= item */ + { 251, 0 }, /* (159) having_opt ::= */ + { 251, -2 }, /* (160) having_opt ::= HAVING expr */ + { 253, 0 }, /* (161) limit_opt ::= */ + { 253, -2 }, /* (162) limit_opt ::= LIMIT signed */ + { 253, -4 }, /* (163) limit_opt ::= LIMIT signed OFFSET signed */ + { 253, -4 }, /* (164) limit_opt ::= LIMIT signed COMMA signed */ + { 252, 0 }, /* (165) slimit_opt ::= */ + { 252, -2 }, /* (166) slimit_opt ::= SLIMIT signed */ + { 252, -4 }, /* (167) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 252, -4 }, /* (168) slimit_opt ::= SLIMIT signed COMMA signed */ + { 245, 0 }, /* (169) where_opt ::= */ + { 245, -2 }, /* (170) where_opt ::= WHERE expr */ + { 256, -3 }, /* (171) expr ::= LP expr RP */ + { 256, -1 }, /* (172) expr ::= ID */ + { 256, -3 }, /* (173) expr ::= ID DOT ID */ + { 256, -3 }, /* (174) expr ::= ID DOT STAR */ + { 256, -1 }, /* (175) expr ::= INTEGER */ + { 256, -2 }, /* (176) expr ::= MINUS INTEGER */ + { 256, -2 }, /* (177) expr ::= PLUS INTEGER */ + { 256, -1 }, /* (178) expr ::= FLOAT */ + { 256, -2 }, /* (179) expr ::= MINUS FLOAT */ + { 256, -2 }, /* (180) expr ::= PLUS FLOAT */ + { 256, -1 }, /* (181) expr ::= STRING */ + { 256, -1 }, /* (182) expr ::= NOW */ + { 256, -1 }, /* (183) expr ::= VARIABLE */ + { 256, -1 }, /* (184) expr ::= BOOL */ + { 256, -4 }, /* (185) expr ::= ID LP exprlist RP */ + { 256, -4 }, /* (186) expr ::= ID LP STAR RP */ + { 256, -3 }, /* (187) expr ::= expr AND expr */ + { 256, -3 }, /* (188) expr ::= expr OR expr */ + { 256, -3 }, /* (189) expr ::= expr LT expr */ + { 256, -3 }, /* (190) expr ::= expr GT expr */ + { 256, -3 }, /* (191) expr ::= expr LE expr */ + { 256, -3 }, /* (192) expr ::= expr GE expr */ + { 256, -3 }, /* (193) expr ::= expr NE expr */ + { 256, -3 }, /* (194) expr ::= expr EQ expr */ + { 256, -3 }, /* (195) expr ::= expr PLUS expr */ + { 256, -3 }, /* (196) expr ::= expr MINUS expr */ + { 256, -3 }, /* (197) expr ::= expr STAR expr */ + { 256, -3 }, /* (198) expr ::= expr SLASH expr */ + { 256, -3 }, /* (199) expr ::= expr REM expr */ + { 256, -3 }, /* (200) expr ::= expr LIKE expr */ + { 256, -5 }, /* (201) expr ::= expr IN LP exprlist RP */ + { 265, -3 }, /* (202) exprlist ::= exprlist COMMA expritem */ + { 265, -1 }, /* (203) exprlist ::= expritem */ + { 266, -1 }, /* (204) expritem ::= expr */ + { 266, 0 }, /* (205) expritem ::= */ + { 205, -3 }, /* (206) cmd ::= RESET QUERY CACHE */ + { 205, -7 }, /* (207) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 205, -7 }, /* (208) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 205, -7 }, /* (209) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 205, -7 }, /* (210) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 205, -8 }, /* (211) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 205, -9 }, /* (212) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 205, -5 }, /* (213) cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */ + { 205, -7 }, /* (214) cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */ + { 205, -7 }, /* (215) cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -1574,27 +1882,66 @@ static void yy_accept(yyParser*); /* Forward Declaration */ /* ** Perform a reduce action and the shift that must immediately ** follow the reduce. +** +** The yyLookahead and yyLookaheadToken parameters provide reduce actions +** access to the lookahead token (if any). The yyLookahead will be YYNOCODE +** if the lookahead token has already been consumed. As this procedure is +** only called from one place, optimizing compilers will in-line it, which +** means that the extra parameters have no performance impact. */ static void yy_reduce( yyParser *yypParser, /* The parser */ - int yyruleno /* Number of the rule by which to reduce */ + unsigned int yyruleno, /* Number of the rule by which to reduce */ + int yyLookahead, /* Lookahead token, or YYNOCODE if none */ + ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ ){ int yygoto; /* The next state */ int yyact; /* The next action */ - YYMINORTYPE yygotominor; /* The LHS of the rule reduced */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ ParseARG_FETCH; - yymsp = &yypParser->yystack[yypParser->yyidx]; + (void)yyLookahead; + (void)yyLookaheadToken; + yymsp = yypParser->yytos; #ifndef NDEBUG - if( yyTraceFILE && yyruleno>=0 - && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ + if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ yysize = yyRuleInfo[yyruleno].nrhs; - fprintf(yyTraceFILE, "%sReduce [%s], go to state %d.\n", yyTracePrompt, - yyRuleName[yyruleno], yymsp[-yysize].stateno); + if( yysize ){ + fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n", + yyTracePrompt, + yyruleno, yyRuleName[yyruleno], yymsp[yysize].stateno); + }else{ + fprintf(yyTraceFILE, "%sReduce %d [%s].\n", + yyTracePrompt, yyruleno, yyRuleName[yyruleno]); + } } #endif /* NDEBUG */ - yygotominor = yyzerominor; + + /* Check that the stack is large enough to grow by a single entry + ** if the RHS of the rule is empty. This ensures that there is room + ** enough on the stack to push the LHS value */ + if( yyRuleInfo[yyruleno].nrhs==0 ){ +#ifdef YYTRACKMAXSTACKDEPTH + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=yypParser->yystackEnd ){ + yyStackOverflow(yypParser); + return; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + return; + } + yymsp = yypParser->yytos; + } +#endif + } switch( yyruleno ){ /* Beginning here are the reduction cases. A typical example @@ -1606,174 +1953,181 @@ static void yy_reduce( ** break; */ /********** Begin reduce actions **********************************************/ + YYMINORTYPE yylhsminor; case 0: /* program ::= cmd */ {} break; case 1: /* cmd ::= SHOW DATABASES */ -{ setDCLSQLElems(pInfo, SHOW_DATABASES, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} break; case 2: /* cmd ::= SHOW MNODES */ -{ setDCLSQLElems(pInfo, SHOW_MNODES, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} break; case 3: /* cmd ::= SHOW DNODES */ -{ setDCLSQLElems(pInfo, SHOW_DNODES, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} break; case 4: /* cmd ::= SHOW ACCOUNTS */ -{ setDCLSQLElems(pInfo, SHOW_ACCOUNTS, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} break; case 5: /* cmd ::= SHOW USERS */ -{ setDCLSQLElems(pInfo, SHOW_USERS, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} break; case 6: /* cmd ::= SHOW MODULES */ -{ setDCLSQLElems(pInfo, SHOW_MODULES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } break; case 7: /* cmd ::= SHOW QUERIES */ -{ setDCLSQLElems(pInfo, SHOW_QUERIES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } break; case 8: /* cmd ::= SHOW CONNECTIONS */ -{ setDCLSQLElems(pInfo, SHOW_CONNECTIONS, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} break; case 9: /* cmd ::= SHOW STREAMS */ -{ setDCLSQLElems(pInfo, SHOW_STREAMS, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } break; case 10: /* cmd ::= SHOW CONFIGS */ -{ setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONFIGS, 0, 0); } break; case 11: /* cmd ::= SHOW SCORES */ -{ setDCLSQLElems(pInfo, SHOW_SCORES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } break; case 12: /* cmd ::= SHOW GRANTS */ -{ setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } break; case 13: /* cmd ::= SHOW VNODES */ -{ setDCLSQLElems(pInfo, SHOW_VNODES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } break; case 14: /* cmd ::= SHOW VNODES IPTOKEN */ -{ setDCLSQLElems(pInfo, SHOW_VNODES, 1, &yymsp[0].minor.yy0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &yymsp[0].minor.yy0, 0); } break; case 15: /* dbPrefix ::= */ - case 44: /* ifexists ::= */ yytestcase(yyruleno==44); - case 46: /* ifnotexists ::= */ yytestcase(yyruleno==46); -{yygotominor.yy0.n = 0;} +{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.type = 0;} break; case 16: /* dbPrefix ::= ids DOT */ -{yygotominor.yy0 = yymsp[-1].minor.yy0; } +{yylhsminor.yy0 = yymsp[-1].minor.yy0; } + yymsp[-1].minor.yy0 = yylhsminor.yy0; break; case 17: /* cpxName ::= */ -{yygotominor.yy0.n = 0; } +{yymsp[1].minor.yy0.n = 0; } break; case 18: /* cpxName ::= DOT ids */ -{yygotominor.yy0 = yymsp[0].minor.yy0; yygotominor.yy0.n += 1; } +{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; } break; case 19: /* cmd ::= SHOW dbPrefix TABLES */ { - setDCLSQLElems(pInfo, SHOW_TABLES, 1, &yymsp[-1].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0); } break; case 20: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ { - setDCLSQLElems(pInfo, SHOW_TABLES, 2, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } break; case 21: /* cmd ::= SHOW dbPrefix STABLES */ { - setDCLSQLElems(pInfo, SHOW_STABLES, 1, &yymsp[-1].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0); } break; case 22: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ { SSQLToken token; setDBName(&token, &yymsp[-3].minor.yy0); - setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &yymsp[0].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } break; case 23: /* cmd ::= SHOW dbPrefix VGROUPS */ { SSQLToken token; setDBName(&token, &yymsp[-1].minor.yy0); - setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } break; case 24: /* cmd ::= SHOW dbPrefix VGROUPS ids */ { SSQLToken token; setDBName(&token, &yymsp[-2].minor.yy0); - setDCLSQLElems(pInfo, SHOW_VGROUPS, 2, &token, &yymsp[0].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } break; case 25: /* cmd ::= DROP TABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - setDCLSQLElems(pInfo, DROP_TABLE, 2, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); + setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); } break; case 26: /* cmd ::= DROP DATABASE ifexists ids */ -{ setDCLSQLElems(pInfo, DROP_DATABASE, 2, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } +{ setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } break; case 27: /* cmd ::= DROP DNODE IPTOKEN */ -{ setDCLSQLElems(pInfo, DROP_DNODE, 1, &yymsp[0].minor.yy0); } +{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } break; case 28: /* cmd ::= DROP USER ids */ -{ setDCLSQLElems(pInfo, DROP_USER, 1, &yymsp[0].minor.yy0); } +{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } break; case 29: /* cmd ::= DROP ACCOUNT ids */ -{ setDCLSQLElems(pInfo, DROP_ACCOUNT, 1, &yymsp[0].minor.yy0); } +{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } break; case 30: /* cmd ::= USE ids */ -{ setDCLSQLElems(pInfo, USE_DATABASE, 1, &yymsp[0].minor.yy0);} +{ setDCLSQLElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} break; case 31: /* cmd ::= DESCRIBE ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); + setDCLSQLElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } break; case 32: /* cmd ::= ALTER USER ids PASS ids */ -{ setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } +{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } break; case 33: /* cmd ::= ALTER USER ids PRIVILEGE ids */ -{ setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} +{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} break; case 34: /* cmd ::= ALTER DNODE IPTOKEN ids */ -{ setDCLSQLElems(pInfo, ALTER_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; case 35: /* cmd ::= ALTER DNODE IPTOKEN ids ids */ -{ setDCLSQLElems(pInfo, ALTER_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; case 36: /* cmd ::= ALTER LOCAL ids */ -{ setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &yymsp[0].minor.yy0); } +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } break; case 37: /* cmd ::= ALTER LOCAL ids ids */ -{ setDCLSQLElems(pInfo, ALTER_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; case 38: /* cmd ::= ALTER DATABASE ids alter_db_optr */ -{ SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &t);} +{ SSQLToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy532, &t);} break; case 39: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ SSQLToken t = {0}; setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-1].minor.yy0, &t, &yymsp[0].minor.yy155);} +{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy239);} break; case 40: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy155);} +{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy239);} break; case 41: /* ids ::= ID */ case 42: /* ids ::= STRING */ yytestcase(yyruleno==42); -{yygotominor.yy0 = yymsp[0].minor.yy0; } +{yylhsminor.yy0 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 43: /* ifexists ::= IF EXISTS */ - case 45: /* ifnotexists ::= IF NOT EXISTS */ yytestcase(yyruleno==45); -{yygotominor.yy0.n = 1;} +{yymsp[-1].minor.yy0.n = 1;} + break; + case 44: /* ifexists ::= */ + case 46: /* ifnotexists ::= */ yytestcase(yyruleno==46); +{yymsp[1].minor.yy0.n = 0;} + break; + case 45: /* ifnotexists ::= IF NOT EXISTS */ +{yymsp[-2].minor.yy0.n = 1;} break; case 47: /* cmd ::= CREATE DNODE IPTOKEN */ -{ setDCLSQLElems(pInfo, CREATE_DNODE, 1, &yymsp[0].minor.yy0);} +{ setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; case 48: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy155);} +{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy239);} break; case 49: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ -{ setCreateDBSQL(pInfo, CREATE_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &yymsp[-2].minor.yy0);} +{ setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy532, &yymsp[-2].minor.yy0);} break; case 50: /* cmd ::= CREATE USER ids PASS ids */ -{ setDCLSQLElems(pInfo, CREATE_USER, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} +{ setCreateUserSQL(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; case 51: /* pps ::= */ case 53: /* tseries ::= */ yytestcase(yyruleno==53); @@ -1784,7 +2138,7 @@ static void yy_reduce( case 63: /* users ::= */ yytestcase(yyruleno==63); case 65: /* conns ::= */ yytestcase(yyruleno==65); case 67: /* state ::= */ yytestcase(yyruleno==67); -{yygotominor.yy0.n = 0; } +{yymsp[1].minor.yy0.n = 0; } break; case 52: /* pps ::= PPS INTEGER */ case 54: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==54); @@ -1795,23 +2149,24 @@ static void yy_reduce( case 64: /* users ::= USERS INTEGER */ yytestcase(yyruleno==64); case 66: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==66); case 68: /* state ::= STATE ids */ yytestcase(yyruleno==68); -{yygotominor.yy0 = yymsp[0].minor.yy0; } +{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 69: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yygotominor.yy155.users = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yygotominor.yy155.dbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yygotominor.yy155.tseries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yygotominor.yy155.streams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yygotominor.yy155.pps = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yygotominor.yy155.storage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yygotominor.yy155.qtime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yygotominor.yy155.conns = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yygotominor.yy155.stat = yymsp[0].minor.yy0; + yylhsminor.yy239.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy239.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy239.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy239.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy239.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy239.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy239.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy239.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy239.stat = yymsp[0].minor.yy0; } + yymsp[-8].minor.yy239 = yylhsminor.yy239; break; case 70: /* keep ::= KEEP tagitemlist */ -{ yygotominor.yy480 = yymsp[0].minor.yy480; } +{ yymsp[-1].minor.yy30 = yymsp[0].minor.yy30; } break; case 71: /* tables ::= TABLES INTEGER */ case 72: /* cache ::= CACHE INTEGER */ yytestcase(yyruleno==72); @@ -1824,67 +2179,84 @@ static void yy_reduce( case 79: /* clog ::= CLOG INTEGER */ yytestcase(yyruleno==79); case 80: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==80); case 81: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==81); -{ yygotominor.yy0 = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 82: /* db_optr ::= */ -{setDefaultCreateDbOption(&yygotominor.yy262);} +{setDefaultCreateDbOption(&yymsp[1].minor.yy532);} break; case 83: /* db_optr ::= db_optr tables */ case 97: /* alter_db_optr ::= alter_db_optr tables */ yytestcase(yyruleno==97); -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.tablesPerVnode = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.tablesPerVnode = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 84: /* db_optr ::= db_optr cache */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 85: /* db_optr ::= db_optr replica */ case 96: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==96); -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 86: /* db_optr ::= db_optr days */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 87: /* db_optr ::= db_optr rows */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.rowPerFileBlock = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.rowPerFileBlock = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 88: /* db_optr ::= db_optr ablocks */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.numOfAvgCacheBlocks = strtod(yymsp[0].minor.yy0.z, NULL); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.numOfAvgCacheBlocks = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 89: /* db_optr ::= db_optr tblocks */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.numOfBlocksPerTable = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.numOfBlocksPerTable = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 90: /* db_optr ::= db_optr ctime */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 91: /* db_optr ::= db_optr clog */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.commitLog = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.commitLog = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 92: /* db_optr ::= db_optr comp */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 93: /* db_optr ::= db_optr prec */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.precision = yymsp[0].minor.yy0; } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 94: /* db_optr ::= db_optr keep */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.keep = yymsp[0].minor.yy480; } +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.keep = yymsp[0].minor.yy30; } + yymsp[-1].minor.yy532 = yylhsminor.yy532; break; case 95: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yygotominor.yy262);} +{ setDefaultCreateDbOption(&yymsp[1].minor.yy532);} break; case 98: /* typename ::= ids */ -{ tSQLSetColumnType (&yygotominor.yy397, &yymsp[0].minor.yy0); } +{ tSQLSetColumnType (&yylhsminor.yy505, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy505 = yylhsminor.yy505; break; case 99: /* typename ::= ids LP signed RP */ { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy369; // negative value of name length - tSQLSetColumnType(&yygotominor.yy397, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy489; // negative value of name length + tSQLSetColumnType(&yylhsminor.yy505, &yymsp[-3].minor.yy0); } + yymsp[-3].minor.yy505 = yylhsminor.yy505; break; case 100: /* signed ::= INTEGER */ - case 101: /* signed ::= PLUS INTEGER */ yytestcase(yyruleno==101); -{ yygotominor.yy369 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yylhsminor.yy489 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy489 = yylhsminor.yy489; + break; + case 101: /* signed ::= PLUS INTEGER */ +{ yymsp[-1].minor.yy489 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; case 102: /* signed ::= MINUS INTEGER */ -{ yygotominor.yy369 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +{ yymsp[-1].minor.yy489 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; case 103: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ { @@ -1894,54 +2266,61 @@ static void yy_reduce( break; case 104: /* create_table_args ::= LP columnlist RP */ { - yygotominor.yy344 = tSetCreateSQLElems(yymsp[-1].minor.yy421, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_NORMAL_METER); + yymsp[-2].minor.yy212 = tSetCreateSQLElems(yymsp[-1].minor.yy325, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); + setSQLInfo(pInfo, yymsp[-2].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; case 105: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ { - yygotominor.yy344 = tSetCreateSQLElems(yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_NORMAL_METRIC); + yymsp[-6].minor.yy212 = tSetCreateSQLElems(yymsp[-5].minor.yy325, yymsp[-1].minor.yy325, NULL, NULL, NULL, TSQL_CREATE_STABLE); + setSQLInfo(pInfo, yymsp[-6].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; case 106: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; - yygotominor.yy344 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy480, NULL, TSQL_CREATE_METER_FROM_METRIC); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_METER_FROM_METRIC); + yymsp[-6].minor.yy212 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy30, NULL, TSQL_CREATE_TABLE_FROM_STABLE); + setSQLInfo(pInfo, yymsp[-6].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; case 107: /* create_table_args ::= AS select */ { - yygotominor.yy344 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy138, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_STREAM); + yymsp[-1].minor.yy212 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy444, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, yymsp[-1].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; case 108: /* columnlist ::= columnlist COMMA column */ -{yygotominor.yy421 = tFieldListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy397); } +{yylhsminor.yy325 = tFieldListAppend(yymsp[-2].minor.yy325, &yymsp[0].minor.yy505); } + yymsp[-2].minor.yy325 = yylhsminor.yy325; break; case 109: /* columnlist ::= column */ -{yygotominor.yy421 = tFieldListAppend(NULL, &yymsp[0].minor.yy397);} +{yylhsminor.yy325 = tFieldListAppend(NULL, &yymsp[0].minor.yy505);} + yymsp[0].minor.yy325 = yylhsminor.yy325; break; case 110: /* column ::= ids typename */ { - tSQLSetColumnInfo(&yygotominor.yy397, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy397); + tSQLSetColumnInfo(&yylhsminor.yy505, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy505); } + yymsp[-1].minor.yy505 = yylhsminor.yy505; break; case 111: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yygotominor.yy480 = tVariantListAppend(yymsp[-2].minor.yy480, &yymsp[0].minor.yy236, -1); } +{ yylhsminor.yy30 = tVariantListAppend(yymsp[-2].minor.yy30, &yymsp[0].minor.yy380, -1); } + yymsp[-2].minor.yy30 = yylhsminor.yy30; break; case 112: /* tagitemlist ::= tagitem */ -{ yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[0].minor.yy236, -1); } +{ yylhsminor.yy30 = tVariantListAppend(NULL, &yymsp[0].minor.yy380, -1); } + yymsp[0].minor.yy30 = yylhsminor.yy30; break; case 113: /* tagitem ::= INTEGER */ case 114: /* tagitem ::= FLOAT */ yytestcase(yyruleno==114); case 115: /* tagitem ::= STRING */ yytestcase(yyruleno==115); case 116: /* tagitem ::= BOOL */ yytestcase(yyruleno==116); -{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yygotominor.yy236, &yymsp[0].minor.yy0); } +{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy380, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy380 = yylhsminor.yy380; break; case 117: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yygotominor.yy236, &yymsp[0].minor.yy0); } +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy380, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy380 = yylhsminor.yy380; break; case 118: /* tagitem ::= MINUS INTEGER */ case 119: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==119); @@ -1951,308 +2330,360 @@ static void yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yygotominor.yy236, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy380, &yymsp[-1].minor.yy0); } + yymsp[-1].minor.yy380 = yylhsminor.yy380; break; - case 122: /* cmd ::= select */ + case 122: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - setSQLInfo(pInfo, yymsp[0].minor.yy138, NULL, TSQL_QUERY_METER); + yylhsminor.yy444 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy506, yymsp[-9].minor.yy30, yymsp[-8].minor.yy388, yymsp[-4].minor.yy30, yymsp[-3].minor.yy30, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy30, &yymsp[0].minor.yy150, &yymsp[-1].minor.yy150); } + yymsp[-11].minor.yy444 = yylhsminor.yy444; break; - case 123: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ -{ - yygotominor.yy138 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy284, yymsp[-9].minor.yy480, yymsp[-8].minor.yy244, yymsp[-4].minor.yy480, yymsp[-3].minor.yy480, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy480, &yymsp[0].minor.yy162, &yymsp[-1].minor.yy162); -} + case 123: /* union ::= select */ +{ yylhsminor.yy309 = setSubclause(NULL, yymsp[0].minor.yy444); } + yymsp[0].minor.yy309 = yylhsminor.yy309; + break; + case 124: /* union ::= LP union RP */ +{ yymsp[-2].minor.yy309 = yymsp[-1].minor.yy309; } break; - case 124: /* select ::= SELECT selcollist */ + case 125: /* union ::= union UNION ALL select */ +{ yylhsminor.yy309 = appendSelectClause(yymsp[-3].minor.yy309, yymsp[0].minor.yy444); } + yymsp[-3].minor.yy309 = yylhsminor.yy309; + break; + case 126: /* union ::= union UNION ALL LP select RP */ +{ yylhsminor.yy309 = appendSelectClause(yymsp[-5].minor.yy309, yymsp[-1].minor.yy444); } + yymsp[-5].minor.yy309 = yylhsminor.yy309; + break; + case 127: /* cmd ::= union */ +{ setSQLInfo(pInfo, yymsp[0].minor.yy309, NULL, TSDB_SQL_SELECT); } + break; + case 128: /* select ::= SELECT selcollist */ { - yygotominor.yy138 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy284, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy444 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy506, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } + yymsp[-1].minor.yy444 = yylhsminor.yy444; break; - case 125: /* sclp ::= selcollist COMMA */ -{yygotominor.yy284 = yymsp[-1].minor.yy284;} + case 129: /* sclp ::= selcollist COMMA */ +{yylhsminor.yy506 = yymsp[-1].minor.yy506;} + yymsp[-1].minor.yy506 = yylhsminor.yy506; break; - case 126: /* sclp ::= */ -{yygotominor.yy284 = 0;} + case 130: /* sclp ::= */ +{yymsp[1].minor.yy506 = 0;} break; - case 127: /* selcollist ::= sclp expr as */ + case 131: /* selcollist ::= sclp expr as */ { - yygotominor.yy284 = tSQLExprListAppend(yymsp[-2].minor.yy284, yymsp[-1].minor.yy244, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy506 = tSQLExprListAppend(yymsp[-2].minor.yy506, yymsp[-1].minor.yy388, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } + yymsp[-2].minor.yy506 = yylhsminor.yy506; break; - case 128: /* selcollist ::= sclp STAR */ + case 132: /* selcollist ::= sclp STAR */ { tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); - yygotominor.yy284 = tSQLExprListAppend(yymsp[-1].minor.yy284, pNode, 0); + yylhsminor.yy506 = tSQLExprListAppend(yymsp[-1].minor.yy506, pNode, 0); } + yymsp[-1].minor.yy506 = yylhsminor.yy506; + break; + case 133: /* as ::= AS ids */ +{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 129: /* as ::= AS ids */ - case 130: /* as ::= ids */ yytestcase(yyruleno==130); -{ yygotominor.yy0 = yymsp[0].minor.yy0; } + case 134: /* as ::= ids */ +{ yylhsminor.yy0 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 131: /* as ::= */ -{ yygotominor.yy0.n = 0; } + case 135: /* as ::= */ +{ yymsp[1].minor.yy0.n = 0; } break; - case 132: /* from ::= FROM tablelist */ - case 144: /* orderby_opt ::= ORDER BY sortlist */ yytestcase(yyruleno==144); - case 152: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==152); -{yygotominor.yy480 = yymsp[0].minor.yy480;} + case 136: /* from ::= FROM tablelist */ +{yymsp[-1].minor.yy30 = yymsp[0].minor.yy30;} break; - case 133: /* tablelist ::= ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy480 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} + case 137: /* tablelist ::= ids cpxName */ +{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy30 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} + yymsp[-1].minor.yy30 = yylhsminor.yy30; break; - case 134: /* tablelist ::= tablelist COMMA ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy480 = tVariantListAppendToken(yymsp[-3].minor.yy480, &yymsp[-1].minor.yy0, -1); } + case 138: /* tablelist ::= tablelist COMMA ids cpxName */ +{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy30 = tVariantListAppendToken(yymsp[-3].minor.yy30, &yymsp[-1].minor.yy0, -1); } + yymsp[-3].minor.yy30 = yylhsminor.yy30; break; - case 135: /* tmvar ::= VARIABLE */ -{yygotominor.yy0 = yymsp[0].minor.yy0;} + case 139: /* tmvar ::= VARIABLE */ +{yylhsminor.yy0 = yymsp[0].minor.yy0;} + yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 136: /* interval_opt ::= INTERVAL LP tmvar RP */ - case 141: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==141); -{yygotominor.yy0 = yymsp[-1].minor.yy0; } + case 140: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 145: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==145); +{yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 137: /* interval_opt ::= */ - case 142: /* sliding_opt ::= */ yytestcase(yyruleno==142); -{yygotominor.yy0.n = 0; yygotominor.yy0.z = NULL; yygotominor.yy0.type = 0; } + case 141: /* interval_opt ::= */ + case 146: /* sliding_opt ::= */ yytestcase(yyruleno==146); +{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 138: /* fill_opt ::= */ -{yygotominor.yy480 = 0; } + case 142: /* fill_opt ::= */ +{yymsp[1].minor.yy30 = 0; } break; - case 139: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 143: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy480, &A, -1, 0); - yygotominor.yy480 = yymsp[-1].minor.yy480; + tVariantListInsert(yymsp[-1].minor.yy30, &A, -1, 0); + yymsp[-5].minor.yy30 = yymsp[-1].minor.yy30; } break; - case 140: /* fill_opt ::= FILL LP ID RP */ + case 144: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yygotominor.yy480 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy30 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 143: /* orderby_opt ::= */ - case 151: /* groupby_opt ::= */ yytestcase(yyruleno==151); -{yygotominor.yy480 = 0;} + case 147: /* orderby_opt ::= */ + case 155: /* groupby_opt ::= */ yytestcase(yyruleno==155); +{yymsp[1].minor.yy30 = 0;} break; - case 145: /* sortlist ::= sortlist COMMA item sortorder */ + case 148: /* orderby_opt ::= ORDER BY sortlist */ + case 156: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==156); +{yymsp[-2].minor.yy30 = yymsp[0].minor.yy30;} + break; + case 149: /* sortlist ::= sortlist COMMA item sortorder */ { - yygotominor.yy480 = tVariantListAppend(yymsp[-3].minor.yy480, &yymsp[-1].minor.yy236, yymsp[0].minor.yy220); + yylhsminor.yy30 = tVariantListAppend(yymsp[-3].minor.yy30, &yymsp[-1].minor.yy380, yymsp[0].minor.yy250); } + yymsp[-3].minor.yy30 = yylhsminor.yy30; break; - case 146: /* sortlist ::= item sortorder */ + case 150: /* sortlist ::= item sortorder */ { - yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[-1].minor.yy236, yymsp[0].minor.yy220); + yylhsminor.yy30 = tVariantListAppend(NULL, &yymsp[-1].minor.yy380, yymsp[0].minor.yy250); } + yymsp[-1].minor.yy30 = yylhsminor.yy30; break; - case 147: /* item ::= ids cpxName */ + case 151: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yygotominor.yy236, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy380, &yymsp[-1].minor.yy0); } + yymsp[-1].minor.yy380 = yylhsminor.yy380; break; - case 148: /* sortorder ::= ASC */ -{yygotominor.yy220 = TSQL_SO_ASC; } + case 152: /* sortorder ::= ASC */ +{yymsp[0].minor.yy250 = TSQL_SO_ASC; } break; - case 149: /* sortorder ::= DESC */ -{yygotominor.yy220 = TSQL_SO_DESC;} + case 153: /* sortorder ::= DESC */ +{yymsp[0].minor.yy250 = TSQL_SO_DESC;} break; - case 150: /* sortorder ::= */ -{yygotominor.yy220 = TSQL_SO_ASC;} + case 154: /* sortorder ::= */ +{yymsp[1].minor.yy250 = TSQL_SO_ASC;} break; - case 153: /* grouplist ::= grouplist COMMA item */ + case 157: /* grouplist ::= grouplist COMMA item */ { - yygotominor.yy480 = tVariantListAppend(yymsp[-2].minor.yy480, &yymsp[0].minor.yy236, -1); + yylhsminor.yy30 = tVariantListAppend(yymsp[-2].minor.yy30, &yymsp[0].minor.yy380, -1); } + yymsp[-2].minor.yy30 = yylhsminor.yy30; break; - case 154: /* grouplist ::= item */ + case 158: /* grouplist ::= item */ { - yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[0].minor.yy236, -1); + yylhsminor.yy30 = tVariantListAppend(NULL, &yymsp[0].minor.yy380, -1); } + yymsp[0].minor.yy30 = yylhsminor.yy30; break; - case 155: /* having_opt ::= */ - case 165: /* where_opt ::= */ yytestcase(yyruleno==165); - case 201: /* expritem ::= */ yytestcase(yyruleno==201); -{yygotominor.yy244 = 0;} + case 159: /* having_opt ::= */ + case 169: /* where_opt ::= */ yytestcase(yyruleno==169); + case 205: /* expritem ::= */ yytestcase(yyruleno==205); +{yymsp[1].minor.yy388 = 0;} break; - case 156: /* having_opt ::= HAVING expr */ - case 166: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==166); - case 200: /* expritem ::= expr */ yytestcase(yyruleno==200); -{yygotominor.yy244 = yymsp[0].minor.yy244;} + case 160: /* having_opt ::= HAVING expr */ + case 170: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==170); +{yymsp[-1].minor.yy388 = yymsp[0].minor.yy388;} break; - case 157: /* limit_opt ::= */ - case 161: /* slimit_opt ::= */ yytestcase(yyruleno==161); -{yygotominor.yy162.limit = -1; yygotominor.yy162.offset = 0;} + case 161: /* limit_opt ::= */ + case 165: /* slimit_opt ::= */ yytestcase(yyruleno==165); +{yymsp[1].minor.yy150.limit = -1; yymsp[1].minor.yy150.offset = 0;} break; - case 158: /* limit_opt ::= LIMIT signed */ - case 162: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==162); -{yygotominor.yy162.limit = yymsp[0].minor.yy369; yygotominor.yy162.offset = 0;} + case 162: /* limit_opt ::= LIMIT signed */ + case 166: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==166); +{yymsp[-1].minor.yy150.limit = yymsp[0].minor.yy489; yymsp[-1].minor.yy150.offset = 0;} break; - case 159: /* limit_opt ::= LIMIT signed OFFSET signed */ - case 163: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==163); -{yygotominor.yy162.limit = yymsp[-2].minor.yy369; yygotominor.yy162.offset = yymsp[0].minor.yy369;} + case 163: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 167: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==167); +{yymsp[-3].minor.yy150.limit = yymsp[-2].minor.yy489; yymsp[-3].minor.yy150.offset = yymsp[0].minor.yy489;} break; - case 160: /* limit_opt ::= LIMIT signed COMMA signed */ - case 164: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==164); -{yygotominor.yy162.limit = yymsp[0].minor.yy369; yygotominor.yy162.offset = yymsp[-2].minor.yy369;} + case 164: /* limit_opt ::= LIMIT signed COMMA signed */ + case 168: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==168); +{yymsp[-3].minor.yy150.limit = yymsp[0].minor.yy489; yymsp[-3].minor.yy150.offset = yymsp[-2].minor.yy489;} break; - case 167: /* expr ::= LP expr RP */ -{yygotominor.yy244 = yymsp[-1].minor.yy244; } + case 171: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy388 = yymsp[-1].minor.yy388; } break; - case 168: /* expr ::= ID */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + case 172: /* expr ::= ID */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 169: /* expr ::= ID DOT ID */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + case 173: /* expr ::= ID DOT ID */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 170: /* expr ::= ID DOT STAR */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + case 174: /* expr ::= ID DOT STAR */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 171: /* expr ::= INTEGER */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + case 175: /* expr ::= INTEGER */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 172: /* expr ::= MINUS INTEGER */ - case 173: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==173); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + case 176: /* expr ::= MINUS INTEGER */ + case 177: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==177); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy388 = yylhsminor.yy388; break; - case 174: /* expr ::= FLOAT */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + case 178: /* expr ::= FLOAT */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 175: /* expr ::= MINUS FLOAT */ - case 176: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==176); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + case 179: /* expr ::= MINUS FLOAT */ + case 180: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==180); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy388 = yylhsminor.yy388; break; - case 177: /* expr ::= STRING */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + case 181: /* expr ::= STRING */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 178: /* expr ::= NOW */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + case 182: /* expr ::= NOW */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 179: /* expr ::= VARIABLE */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + case 183: /* expr ::= VARIABLE */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 180: /* expr ::= BOOL */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + case 184: /* expr ::= BOOL */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 181: /* expr ::= ID LP exprlist RP */ + case 185: /* expr ::= ID LP exprlist RP */ { - yygotominor.yy244 = tSQLExprCreateFunction(yymsp[-1].minor.yy284, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); + yylhsminor.yy388 = tSQLExprCreateFunction(yymsp[-1].minor.yy506, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy388 = yylhsminor.yy388; break; - case 182: /* expr ::= ID LP STAR RP */ + case 186: /* expr ::= ID LP STAR RP */ { - yygotominor.yy244 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); + yylhsminor.yy388 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy388 = yylhsminor.yy388; break; - case 183: /* expr ::= expr AND expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_AND);} - break; - case 184: /* expr ::= expr OR expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_OR); } - break; - case 185: /* expr ::= expr LT expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LT);} + case 187: /* expr ::= expr AND expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_AND);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 186: /* expr ::= expr GT expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_GT);} + case 188: /* expr ::= expr OR expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_OR); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 187: /* expr ::= expr LE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LE);} + case 189: /* expr ::= expr LT expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_LT);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 188: /* expr ::= expr GE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_GE);} + case 190: /* expr ::= expr GT expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_GT);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 189: /* expr ::= expr NE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_NE);} + case 191: /* expr ::= expr LE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_LE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 190: /* expr ::= expr EQ expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_EQ);} + case 192: /* expr ::= expr GE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_GE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 191: /* expr ::= expr PLUS expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_PLUS); } + case 193: /* expr ::= expr NE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_NE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 192: /* expr ::= expr MINUS expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_MINUS); } + case 194: /* expr ::= expr EQ expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_EQ);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 193: /* expr ::= expr STAR expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_STAR); } + case 195: /* expr ::= expr PLUS expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_PLUS); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 194: /* expr ::= expr SLASH expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_DIVIDE);} + case 196: /* expr ::= expr MINUS expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_MINUS); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 195: /* expr ::= expr REM expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_REM); } + case 197: /* expr ::= expr STAR expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_STAR); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 196: /* expr ::= expr LIKE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LIKE); } + case 198: /* expr ::= expr SLASH expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_DIVIDE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 197: /* expr ::= expr IN LP exprlist RP */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-4].minor.yy244, (tSQLExpr*)yymsp[-1].minor.yy284, TK_IN); } + case 199: /* expr ::= expr REM expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_REM); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 198: /* exprlist ::= exprlist COMMA expritem */ - case 205: /* itemlist ::= itemlist COMMA expr */ yytestcase(yyruleno==205); -{yygotominor.yy284 = tSQLExprListAppend(yymsp[-2].minor.yy284,yymsp[0].minor.yy244,0);} + case 200: /* expr ::= expr LIKE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_LIKE); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 199: /* exprlist ::= expritem */ - case 206: /* itemlist ::= expr */ yytestcase(yyruleno==206); -{yygotominor.yy284 = tSQLExprListAppend(0,yymsp[0].minor.yy244,0);} + case 201: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-4].minor.yy388, (tSQLExpr*)yymsp[-1].minor.yy506, TK_IN); } + yymsp[-4].minor.yy388 = yylhsminor.yy388; break; - case 202: /* cmd ::= INSERT INTO cpxName insert_value_list */ -{ - tSetInsertSQLElems(pInfo, &yymsp[-1].minor.yy0, yymsp[0].minor.yy237); -} + case 202: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy506 = tSQLExprListAppend(yymsp[-2].minor.yy506,yymsp[0].minor.yy388,0);} + yymsp[-2].minor.yy506 = yylhsminor.yy506; break; - case 203: /* insert_value_list ::= VALUES LP itemlist RP */ -{yygotominor.yy237 = tSQLListListAppend(NULL, yymsp[-1].minor.yy284);} + case 203: /* exprlist ::= expritem */ +{yylhsminor.yy506 = tSQLExprListAppend(0,yymsp[0].minor.yy388,0);} + yymsp[0].minor.yy506 = yylhsminor.yy506; break; - case 204: /* insert_value_list ::= insert_value_list VALUES LP itemlist RP */ -{yygotominor.yy237 = tSQLListListAppend(yymsp[-4].minor.yy237, yymsp[-1].minor.yy284);} + case 204: /* expritem ::= expr */ +{yylhsminor.yy388 = yymsp[0].minor.yy388;} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 207: /* cmd ::= RESET QUERY CACHE */ -{ setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);} + case 206: /* cmd ::= RESET QUERY CACHE */ +{ setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 208: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 207: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, ALTER_TABLE_ADD_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy325, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 209: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 208: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); tVariantList* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, ALTER_TABLE_DROP_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 210: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 209: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, ALTER_TABLE_TAGS_ADD); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy325, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 211: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 210: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); tVariantList* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, ALTER_TABLE_TAGS_DROP); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 212: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 211: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2262,60 +2693,52 @@ static void yy_reduce( toTSDBType(yymsp[0].minor.yy0.type); A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-5].minor.yy0, NULL, A, ALTER_TABLE_TAGS_CHG); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 213: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 212: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); tVariantList* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy236, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy380, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, ALTER_TABLE_TAGS_SET); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 214: /* cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */ -{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &yymsp[-2].minor.yy0);} + case 213: /* cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */ +{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[-2].minor.yy0);} break; - case 215: /* cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */ -{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &yymsp[-4].minor.yy0);} + case 214: /* cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */ +{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-4].minor.yy0);} break; - case 216: /* cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */ -{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &yymsp[-4].minor.yy0);} + case 215: /* cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */ +{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-4].minor.yy0);} break; default: break; /********** End reduce actions ************************************************/ }; - assert( yyruleno>=0 && yyrulenoyyidx -= yysize; - yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); - if( yyact <= YY_MAX_SHIFTREDUCE ){ - if( yyact>YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; - /* If the reduce action popped at least - ** one element off the stack, then we can push the new element back - ** onto the stack here, and skip the stack overflow test in yy_shift(). - ** That gives a significant speed improvement. */ - if( yysize ){ - yypParser->yyidx++; - yymsp -= yysize-1; - yymsp->stateno = (YYACTIONTYPE)yyact; - yymsp->major = (YYCODETYPE)yygoto; - yymsp->minor = yygotominor; - yyTraceShift(yypParser, yyact); - }else{ - yy_shift(yypParser,yyact,yygoto,&yygotominor); - } - }else{ - assert( yyact == YY_ACCEPT_ACTION ); - yy_accept(yypParser); - } + yyact = yy_find_reduce_action(yymsp[yysize].stateno,(YYCODETYPE)yygoto); + + /* There are no SHIFTREDUCE actions on nonterminals because the table + ** generator has simplified them to pure REDUCE actions. */ + assert( !(yyact>YY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) ); + + /* It is not possible for a REDUCE to be followed by an error */ + assert( yyact!=YY_ERROR_ACTION ); + + yymsp += yysize+1; + yypParser->yytos = yymsp; + yymsp->stateno = (YYACTIONTYPE)yyact; + yymsp->major = (YYCODETYPE)yygoto; + yyTraceShift(yypParser, yyact, "... then shift"); } /* @@ -2331,7 +2754,7 @@ static void yy_parse_failed( fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will be executed whenever the ** parser fails */ /************ Begin %parse_failure code ***************************************/ @@ -2346,13 +2769,13 @@ static void yy_parse_failed( static void yy_syntax_error( yyParser *yypParser, /* The parser */ int yymajor, /* The major type of the error token */ - YYMINORTYPE yyminor /* The minor type of the error token */ + ParseTOKENTYPE yyminor /* The minor type of the error token */ ){ ParseARG_FETCH; -#define TOKEN (yyminor.yy0) +#define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ - pInfo->validSql = false; + pInfo->valid = false; int32_t outputBufLen = tListLen(pInfo->pzErrMsg); int32_t len = 0; @@ -2389,7 +2812,10 @@ static void yy_accept( fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + assert( yypParser->yytos==yypParser->yystack ); /* Here code is inserted which will be executed whenever the ** parser accepts */ /*********** Begin %parse_accept code *****************************************/ @@ -2424,7 +2850,7 @@ void Parse( ParseARG_PDECL /* Optional %extra_argument parameter */ ){ YYMINORTYPE yyminorunion; - int yyact; /* The parser action. */ + unsigned int yyact; /* The parser action. */ #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) int yyendofinput; /* True if we are at the end of input */ #endif @@ -2433,29 +2859,8 @@ void Parse( #endif yyParser *yypParser; /* The parser */ - /* (re)initialize the parser, if necessary */ yypParser = (yyParser*)yyp; - if( yypParser->yyidx<0 ){ -#if YYSTACKDEPTH<=0 - if( yypParser->yystksz <=0 ){ - /*memset(&yyminorunion, 0, sizeof(yyminorunion));*/ - yyminorunion = yyzerominor; - yyStackOverflow(yypParser, &yyminorunion); - return; - } -#endif - yypParser->yyidx = 0; - yypParser->yyerrcnt = -1; - yypParser->yystack[0].stateno = 0; - yypParser->yystack[0].major = 0; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sInitialize. Empty stack. State 0\n", - yyTracePrompt); - } -#endif - } - yyminorunion.yy0 = yyminor; + assert( yypParser->yytos!=0 ); #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif @@ -2463,21 +2868,34 @@ void Parse( #ifndef NDEBUG if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sInput '%s'\n",yyTracePrompt,yyTokenName[yymajor]); + int stateno = yypParser->yytos->stateno; + if( stateno < YY_MIN_REDUCE ){ + fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", + yyTracePrompt,yyTokenName[yymajor],stateno); + }else{ + fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", + yyTracePrompt,yyTokenName[yymajor],stateno-YY_MIN_REDUCE); + } } #endif do{ yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); - if( yyact <= YY_MAX_SHIFTREDUCE ){ - if( yyact > YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; - yy_shift(yypParser,yyact,yymajor,&yyminorunion); + if( yyact >= YY_MIN_REDUCE ){ + yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,yyminor); + }else if( yyact <= YY_MAX_SHIFTREDUCE ){ + yy_shift(yypParser,yyact,yymajor,yyminor); +#ifndef YYNOERRORRECOVERY yypParser->yyerrcnt--; +#endif yymajor = YYNOCODE; - }else if( yyact <= YY_MAX_REDUCE ){ - yy_reduce(yypParser,yyact-YY_MIN_REDUCE); + }else if( yyact==YY_ACCEPT_ACTION ){ + yypParser->yytos--; + yy_accept(yypParser); + return; }else{ assert( yyact == YY_ERROR_ACTION ); + yyminorunion.yy0 = yyminor; #ifdef YYERRORSYMBOL int yymx; #endif @@ -2507,9 +2925,9 @@ void Parse( ** */ if( yypParser->yyerrcnt<0 ){ - yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_syntax_error(yypParser,yymajor,yyminor); } - yymx = yypParser->yystack[yypParser->yyidx].major; + yymx = yypParser->yytos->major; if( yymx==YYERRORSYMBOL || yyerrorhit ){ #ifndef NDEBUG if( yyTraceFILE ){ @@ -2517,26 +2935,26 @@ void Parse( yyTracePrompt,yyTokenName[yymajor]); } #endif - yy_destructor(yypParser, (YYCODETYPE)yymajor,&yyminorunion); + yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); yymajor = YYNOCODE; }else{ - while( - yypParser->yyidx >= 0 && - yymx != YYERRORSYMBOL && - (yyact = yy_find_reduce_action( - yypParser->yystack[yypParser->yyidx].stateno, + while( yypParser->yytos >= yypParser->yystack + && yymx != YYERRORSYMBOL + && (yyact = yy_find_reduce_action( + yypParser->yytos->stateno, YYERRORSYMBOL)) >= YY_MIN_REDUCE ){ yy_pop_parser_stack(yypParser); } - if( yypParser->yyidx < 0 || yymajor==0 ){ + if( yypParser->yytos < yypParser->yystack || yymajor==0 ){ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif yymajor = YYNOCODE; }else if( yymx!=YYERRORSYMBOL ){ - YYMINORTYPE u2; - u2.YYERRSYMDT = 0; - yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2); + yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor); } } yypParser->yyerrcnt = 3; @@ -2549,7 +2967,7 @@ void Parse( ** Applications can set this macro (for example inside %include) if ** they intend to abandon the parse upon the first syntax error seen. */ - yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_syntax_error(yypParser,yymajor, yyminor); yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yymajor = YYNOCODE; @@ -2564,24 +2982,29 @@ void Parse( ** three input tokens have been successfully shifted. */ if( yypParser->yyerrcnt<=0 ){ - yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_syntax_error(yypParser,yymajor, yyminor); } yypParser->yyerrcnt = 3; yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); if( yyendofinput ){ yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif } yymajor = YYNOCODE; #endif } - }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 ); + }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); #ifndef NDEBUG if( yyTraceFILE ){ - int i; + yyStackEntry *i; + char cDiv = '['; fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); - for(i=1; i<=yypParser->yyidx; i++) - fprintf(yyTraceFILE,"%c%s", i==1 ? '[' : ' ', - yyTokenName[yypParser->yystack[i].major]); + for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){ + fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]); + cDiv = ' '; + } fprintf(yyTraceFILE,"]\n"); } #endif diff --git a/src/client/src/taos.def b/src/client/src/taos.def index f6de4e866587ce79d224311241510ad0170efa66..39906c7486dc242513f31028c367607fa0197dc9 100644 --- a/src/client/src/taos.def +++ b/src/client/src/taos.def @@ -24,8 +24,6 @@ taos_fetch_row_a taos_subscribe taos_consume taos_unsubscribe -taos_subfields_count -taos_fetch_subfields taos_open_stream taos_close_stream taos_fetch_block diff --git a/src/client/src/tscAst.c b/src/client/src/tscAst.c index d071358dbf0b9611eabac34d5f4a87eef0b6a646..cf0873b5b620ddc118863e3c2cae86080f1d481f 100644 --- a/src/client/src/tscAst.c +++ b/src/client/src/tscAst.c @@ -17,6 +17,7 @@ #include "taosmsg.h" #include "tast.h" #include "tlog.h" +#include "tscSQLParser.h" #include "tscSyntaxtreefunction.h" #include "tschemautil.h" #include "tsdb.h" @@ -26,7 +27,6 @@ #include "tstoken.h" #include "ttypes.h" #include "tutil.h" -#include "tscSQLParser.h" /* * @@ -115,6 +115,9 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, int32_t i = 0; if (pToken->type == TK_ID) { do { + SSQLToken tableToken = {0}; + extractTableNameFromToken(pToken, &tableToken); + size_t len = strlen(pSchema[i].name); if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break; } while (++i < numOfCols); @@ -268,7 +271,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha } // get the operator of expr - uint8_t optr = getBinaryExprOptr(&t0); + uint8_t optr = getBinaryExprOptr(&t0); if (optr == 0) { pError("not support binary operator:%d", t0.type); tSQLSyntaxNodeDestroy(pLeft, NULL); @@ -323,7 +326,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha pn->colId = -1; return pn; } else { - uint8_t localOptr = getBinaryExprOptr(&t0); + uint8_t localOptr = getBinaryExprOptr(&t0); if (localOptr == 0) { pError("not support binary operator:%d", t0.type); free(pBinExpr); @@ -419,17 +422,17 @@ void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) { if (pExpr == NULL) { *dst = 0; *len = 0; - return; + return; } - int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType); + int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType); dst += lhs; *len = lhs; - char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst); + char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst); *len += (start - dst); - *len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType); + *len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType); } static void UNUSED_FUNC destroySyntaxTree(tSQLSyntaxNode *pNode) { tSQLSyntaxNodeDestroy(pNode, NULL); } @@ -492,12 +495,12 @@ static void setInitialValueForRangeQueryCondition(tSKipListQueryCond *q, int8_t case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: { q->upperBnd.nType = type; - q->upperBnd.pz = "\0"; + q->upperBnd.pz = NULL; q->upperBnd.nLen = -1; q->lowerBnd.nType = type; - q->lowerBnd.pz = "\0"; - q->lowerBnd.nLen = 0; + q->lowerBnd.pz = NULL; + q->lowerBnd.nLen = -1; } } } @@ -645,7 +648,7 @@ int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResults /* * traverse the result and apply the function to each item to check if the item is qualified or not */ -static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) { +static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) { assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE); // brutal force scan the result list and check for each item in the list @@ -833,7 +836,7 @@ void tSQLBinaryExprCalcTraverse(tSQLBinaryExpr *pExprs, int32_t numOfRows, char tSQLSyntaxNode *pRight = pExprs->pRight; /* the left output has result from the left child syntax tree */ - char *pLeftOutput = malloc(sizeof(int64_t) * numOfRows); + char *pLeftOutput = (char*)malloc(sizeof(int64_t) * numOfRows); if (pLeft->nodeType == TSQL_NODE_EXPR) { tSQLBinaryExprCalcTraverse(pLeft->pExpr, numOfRows, pLeftOutput, param, order, getSourceDataBlock); } diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 99b9b571d7ffe513e87206c5cd0c5d380318ca95..94ebaefd369975c6873e6dce7ff36c3b513ad91e 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -26,19 +26,18 @@ #include "tutil.h" #include "tnote.h" -void tscProcessFetchRow(SSchedMsg *pMsg); -void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows); -static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows); -static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows); +static void tscProcessFetchRow(SSchedMsg *pMsg); +static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows); static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)()); /* - * proxy function to perform sequentially query&retrieve operation. - * If sql queries upon metric and two-stage merge procedure is not needed, - * it will sequentially query&retrieve data for all vnodes in pCmd->pMetricMeta + * Proxy function to perform sequentially query&retrieve operation. + * If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection + * query), it will sequentially query&retrieve data for all vnodes */ -static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows); +static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows); +static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows); // TODO return the correct error code to client in tscQueueAsyncError void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) { @@ -81,7 +80,6 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, return; } - pSql->sqlstr = malloc(sqlLen + 1); if (pSql->sqlstr == NULL) { tscError("%p failed to malloc sql string buffer", pSql); @@ -95,9 +93,9 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, pRes->numOfRows = 1; strtolower(pSql->sqlstr, sqlstr); - tscTrace("%p Async SQL: %s, pObj:%p", pSql, pSql->sqlstr, pObj); + tscDump("%p pObj:%p, Async SQL: %s", pSql, pObj, pSql->sqlstr); - int32_t code = tsParseSql(pSql, pObj->acctId, pObj->db, true); + int32_t code = tsParseSql(pSql, true); if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; if (code != TSDB_CODE_SUCCESS) { @@ -109,7 +107,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, tscDoQuery(pSql); } -static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) { +static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) { if (tres == NULL) { return; } @@ -118,36 +116,32 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - // sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx - if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) { - // vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - assert(pMeterMetaInfo->vnodeIndex >= 0); - - /* reach the maximum number of output rows, abort */ - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { - (*pSql->fetchFp)(param, tres, 0); - return; - } - - /* update the limit value according to current retrieval results */ - pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - pCmd->limit.offset = pRes->offset; - - if ((++(pMeterMetaInfo->vnodeIndex)) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { - tscTrace("%p retrieve data from next vnode:%d", pSql, pMeterMetaInfo->vnodeIndex); - - pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. + if (numOfRows == 0) { + if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. + tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode); + } else { + /* + * all available virtual node has been checked already, now we need to check + * for the next subclause queries + */ + if (pCmd->clauseIndex < pCmd->numOfClause - 1) { + tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode); + return; + } - tscResetForNextRetrieve(pRes); - pSql->fp = tscProcessAsyncRetrieveNextVnode; - tscProcessSql(pSql); - return; - } - } else { // localreducer has handle this situation - if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { - pRes->numOfTotal += pRes->numOfRows; + /* + * 1. has reach the limitation + * 2. no remain virtual nodes to be retrieved anymore + */ + (*pSql->fetchFp)(param, pSql, 0); } + + return; + } + + // local reducer has handle this situation during super table non-projection query. + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { + pRes->numOfTotalInCurrentClause += pRes->numOfRows; } (*pSql->fetchFp)(param, tres, numOfRows); @@ -164,7 +158,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - if (pRes->qhandle == 0 || numOfRows != 0) { + if ((pRes->qhandle == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) { if (pRes->qhandle == 0) { tscError("qhandle is NULL"); } else { @@ -183,14 +177,18 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo } /* - * retrieve callback for fetch rows proxy. It serves as the callback function of querying vnode + * retrieve callback for fetch rows proxy. + * The below two functions both serve as the callback function of query virtual node. + * query callback first, and then followed by retrieve callback */ -static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows) { - tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncFetchRowsProxy); +static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows) { + // query completed, continue to retrieve + tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy); } -static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows) { - tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncRetrieve); +void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows) { + // query completed, continue to retrieve + tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy); } void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), void *param) { @@ -213,7 +211,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi // user-defined callback function is stored in fetchFp pSql->fetchFp = fp; - pSql->fp = tscProcessAsyncFetchRowsProxy; + pSql->fp = tscAsyncFetchRowsProxy; pSql->param = param; tscResetForNextRetrieve(pRes); @@ -245,11 +243,15 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), pSql->fetchFp = fp; pSql->param = param; - + if (pRes->row >= pRes->numOfRows) { tscResetForNextRetrieve(pRes); - pSql->fp = tscProcessAsyncRetrieve; - pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + pSql->fp = tscAsyncFetchSingleRowProxy; + + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC && pCmd->command < TSDB_SQL_LOCAL) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + tscProcessSql(pSql); } else { SSchedMsg schedMsg; @@ -261,58 +263,45 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), } } -void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) { +void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) { SSqlObj *pSql = (SSqlObj *)tres; SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (numOfRows == 0) { - // sequentially retrieve data from remain vnodes. - if (tscProjectionQueryOnMetric(pCmd)) { + if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. + tscTryQueryNextVnode(pSql, tscAsyncQuerySingleRowForNextVnode); + } else { /* - * vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved + * 1. has reach the limitation + * 2. no remain virtual nodes to be retrieved anymore */ - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - assert(pMeterMetaInfo->vnodeIndex >= 0); - - /* reach the maximum number of output rows, abort */ - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { - (*pSql->fetchFp)(pSql->param, pSql, NULL); - return; - } - - /* update the limit value according to current retrieval results */ - pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - - if ((++pMeterMetaInfo->vnodeIndex) <= pMeterMetaInfo->pMetricMeta->numOfVnodes) { - pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. - - tscResetForNextRetrieve(pRes); - pSql->fp = tscProcessAsyncContinueRetrieve; - tscProcessSql(pSql); - return; - } - } else { (*pSql->fetchFp)(pSql->param, pSql, NULL); } - } else { - for (int i = 0; i < pCmd->numOfCols; ++i) - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; - pRes->row++; - - (*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow); + return; } + + for (int i = 0; i < pCmd->numOfCols; ++i) + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row; + pRes->row++; + + (*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow); } void tscProcessFetchRow(SSchedMsg *pMsg) { SSqlObj *pSql = (SSqlObj *)pMsg->ahandle; SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - for (int i = 0; i < pCmd->numOfCols; ++i) - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; + for (int i = 0; i < pCmd->numOfCols; ++i) { + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row; + } + pRes->row++; - (*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow); } @@ -371,7 +360,7 @@ void tscQueueAsyncRes(SSqlObj *pSql) { tscTrace("%p SqlObj is freed, not add into queue async res", pSql); return; } else { - tscTrace("%p add into queued async res, code:%d", pSql, pSql->res.code); + tscError("%p add into queued async res, code:%d", pSql, pSql->res.code); } SSchedMsg schedMsg; @@ -404,10 +393,13 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) SSqlCmd *pCmd = &pSql->cmd; int32_t code = TSDB_CODE_SUCCESS; - assert(!pCmd->isInsertFromFile && pSql->signature == pSql); + assert(pCmd->dataSourceType != 0 && pSql->signature == pSql); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - assert(pCmd->numOfTables == 1); + int32_t index = 0; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1 || pQueryInfo->numOfTables == 2); SDataBlockList *pDataBlocks = pCmd->pDataBlocks; if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { @@ -444,7 +436,6 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlObj *pSql = (SSqlObj *)param; if (pSql == NULL || pSql->signature != pSql) return; - STscObj *pObj = pSql->pTscObj; SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -464,10 +455,11 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { } else { tscTrace("%p renew meterMeta successfully, command:%d, code:%d, thandle:%p, retry:%d", pSql, pSql->cmd.command, pSql->res.code, pSql->thandle, pSql->retry); - - assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta == NULL); - tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0); - + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + assert(pMeterMetaInfo->pMeterMeta == NULL); + + tscGetMeterMeta(pSql, pMeterMetaInfo); code = tscSendMsgToServer(pSql); if (code != 0) { pRes->code = code; @@ -485,49 +477,65 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { } if (pSql->pStream == NULL) { - // check if it is a sub-query of metric query first, if true, enter another routine - if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + // check if it is a sub-query of super table query first, if true, enter another routine + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL); SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SSqlObj * pParObj = trs->pParentSqlObj; + assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex && pMeterMetaInfo->pMeterMeta->numOfTags != 0); - tscTrace("%p get metricMeta during metric query successfully", pSql); - - code = tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0); + tscTrace("%p get metricMeta during super table query successfully", pSql); + + code = tscGetMeterMeta(pSql, pMeterMetaInfo); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; - code = tscGetMetricMeta(pSql); + code = tscGetMetricMeta(pSql, 0); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; } else { // normal async query continues - code = tsParseSql(pSql, pObj->acctId, pObj->db, false); - if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + if (pCmd->isParseFinish) { + tscTrace("%p resend data to vnode in metermeta callback since sql has been parsed completed", pSql); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + code = tscGetMeterMeta(pSql, pMeterMetaInfo); + assert(code == TSDB_CODE_SUCCESS); + + if (pMeterMetaInfo->pMeterMeta) { + code = tscSendMsgToServer(pSql); + if (code == TSDB_CODE_SUCCESS) return; + } + } else { + code = tsParseSql(pSql, false); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + } } } else { // stream computing - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + code = tscGetMeterMeta(pSql, pMeterMetaInfo); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - code = tscGetMetricMeta(pSql); + if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + code = tscGetMetricMeta(pSql, pCmd->clauseIndex); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; } } - if (code != 0) { + if (code != TSDB_CODE_SUCCESS) { + pSql->res.code = code; tscQueueAsyncRes(pSql); return; } @@ -536,10 +544,12 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { tscTrace("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command); /* * NOTE: - * transfer the sql function for metric query before get meter/metric meta, + * transfer the sql function for super table query before get meter/metric meta, * since in callback functions, only tscProcessSql(pStream->pSql) is executed! */ - tscTansformSQLFunctionForMetricQuery(&pSql->cmd); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + tscTansformSQLFunctionForSTableQuery(pQueryInfo); tscIncStreamExecutionCount(pSql->pStream); } else { tscTrace("%p get meterMeta/metricMeta successfully", pSql); diff --git a/src/client/src/tscCache.c b/src/client/src/tscCache.c index 1ac32d7502ee99c38f84445cfeb767ad316b06ed..666d069a58c936e9028b46f9e6244923ac4be993 100644 --- a/src/client/src/tscCache.c +++ b/src/client/src/tscCache.c @@ -96,11 +96,7 @@ void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, uint16_t port, pObj = (SConnCache *)handle; if (pObj == NULL || pObj->maxSessions == 0) return NULL; -#ifdef CLUSTER - if (data == NULL || ip == 0) { -#else if (data == NULL) { -#endif tscTrace("data:%p ip:%p:%d not valid, not added in cache", data, ip, port); return NULL; } diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 5f18658675ce3a48b8d8bf448978f75542efd28b..248b197d5240e2ee0c680e46f4c542ccce21446e 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -72,6 +72,8 @@ for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \ void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {} void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {} +void doFinalizer(SQLFunctionCtx *pCtx) { resetResultInfo(GET_RES_INFO(pCtx)); } + typedef struct tValuePair { tVariant v; int64_t timestamp; @@ -355,8 +357,8 @@ static void function_finalizer(SQLFunctionCtx *pCtx) { pTrace("no result generated, result is set to NULL"); setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); } - - resetResultInfo(GET_RES_INFO(pCtx)); + + doFinalizer(pCtx); } /* @@ -889,6 +891,7 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) { // cannot set the numOfIteratedElems again since it is set during previous iteration GET_RES_INFO(pCtx)->numOfRes = 1; + doFinalizer(pCtx); } ///////////////////////////////////////////////////////////////////////////////////////////// @@ -909,7 +912,17 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, tval = &pCtx->preAggVals.max; index = pCtx->preAggVals.maxIndex; } - + + /** + * NOTE: work around the bug caused by invalid pre-calculated function. + * Here the selectivity + ts will not return correct value. + * + * The following codes of 3 lines will be removed later. + */ + if (index < 0 || index >= pCtx->size + pCtx->startOffset) { + index = 0; + } + TSKEY key = pCtx->ptsList[index]; if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { @@ -1423,8 +1436,8 @@ static void stddev_finalizer(SQLFunctionCtx *pCtx) { *retValue = sqrt(pStd->res / pStd->num); SET_VAL(pCtx, 1, 1); } - - resetResultInfo(GET_RES_INFO(pCtx)); + + doFinalizer(pCtx); } ////////////////////////////////////////////////////////////////////////////////////// @@ -1456,7 +1469,9 @@ static void first_function(SQLFunctionCtx *pCtx) { } memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes); - DO_UPDATE_TAG_COLUMNS(pCtx, i); + + TSKEY k = pCtx->ptsList[i]; + DO_UPDATE_TAG_COLUMNS(pCtx, k); SResultInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; @@ -1824,7 +1839,7 @@ static void last_row_finalizer(SQLFunctionCtx *pCtx) { } GET_RES_INFO(pCtx)->numOfRes = 1; - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); } ////////////////////////////////////////////////////////////////////////////////// @@ -2005,15 +2020,8 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { STopBotInfo *pRes = pResInfo->interResultBuf; tValuePair **tvp = pRes->res; - int32_t step = 0; - - // in case of second stage merge, always use incremental output. - if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { - step = QUERY_ASC_FORWARD_STEP; - } else { - step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - } - + + int32_t step = QUERY_ASC_FORWARD_STEP; int32_t len = GET_RES_INFO(pCtx)->numOfRes; switch (type) { @@ -2392,8 +2400,8 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) { GET_TRUE_DATA_TYPE(); copyTopBotRes(pCtx, type); - - resetResultInfo(pResInfo); + + doFinalizer(pCtx); } /////////////////////////////////////////////////////////////////////////////////////////////// @@ -2469,8 +2477,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { tOrderDescDestroy(pMemBucket->pOrderDesc); tMemBucketDestroy(pMemBucket); - - resetResultInfo(GET_RES_INFO(pCtx)); + + doFinalizer(pCtx); } ////////////////////////////////////////////////////////////////////////////////// @@ -2678,8 +2686,8 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) { return; } } - - resetResultInfo(pResInfo); + + doFinalizer(pCtx); } ///////////////////////////////////////////////////////////////////////////////// @@ -2859,7 +2867,7 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { param[1][2] /= param[1][1]; sprintf(pCtx->aOutputBuf, "(%lf, %lf)", param[0][2], param[1][2]); - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); } static void date_col_output_function(SQLFunctionCtx *pCtx) { @@ -2878,17 +2886,17 @@ static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_ static void col_project_function(SQLFunctionCtx *pCtx) { INC_INIT_VAL(pCtx, pCtx->size); - char *pDest = 0; + char *pData = GET_INPUT_CHAR(pCtx); if (pCtx->order == TSQL_SO_ASC) { - pDest = pCtx->aOutputBuf; + memcpy(pCtx->aOutputBuf, pData, (size_t)pCtx->size * pCtx->inputBytes); } else { - pDest = pCtx->aOutputBuf - (pCtx->size - 1) * pCtx->inputBytes; + for(int32_t i = 0; i < pCtx->size; ++i) { + memcpy(pCtx->aOutputBuf + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes, + pCtx->inputBytes); + } } - char *pData = GET_INPUT_CHAR(pCtx); - memcpy(pDest, pData, (size_t)pCtx->size * pCtx->inputBytes); - - pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes; } static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -2903,7 +2911,7 @@ static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); - pCtx->aOutputBuf += pCtx->inputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->inputBytes/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; } /** @@ -2915,18 +2923,17 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { INC_INIT_VAL(pCtx, pCtx->size); assert(pCtx->inputBytes == pCtx->outputBytes); - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); for (int32_t i = 0; i < pCtx->size; ++i) { tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType); - pCtx->aOutputBuf += pCtx->outputBytes * factor; + pCtx->aOutputBuf += pCtx->outputBytes; } } static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->tag.nType); - pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->outputBytes; } /** @@ -2975,8 +2982,8 @@ static void diff_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - int32_t i = (pCtx->order == TSQL_SO_ASC) ? 0 : pCtx->size - 1; + TSKEY * pTimestamp = pCtx->ptsOutputBuf; switch (pCtx->inputType) { @@ -2996,14 +3003,14 @@ static void diff_function(SQLFunctionCtx *pCtx) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3024,18 +3031,18 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].i64Key = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3056,16 +3063,16 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].dKey = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].dKey; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].dKey = pData[i]; @@ -3086,16 +3093,18 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].dKey = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].dKey; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } // keep the last value, the remain may be all null @@ -3117,16 +3126,17 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].i64Key = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3147,16 +3157,18 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].i64Key = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3181,8 +3193,8 @@ static void diff_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += forwardStep; - pCtx->aOutputBuf = pCtx->aOutputBuf + forwardStep * pCtx->outputBytes * step; - pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE * step; + pCtx->aOutputBuf += forwardStep * pCtx->outputBytes; + pCtx->ptsOutputBuf = (char*)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE; } } @@ -3209,7 +3221,7 @@ static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) { GET_RES_INFO(pCtx)->numOfRes += 1; } - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + int32_t step = 1/*GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; switch (pCtx->inputType) { case TSDB_DATA_TYPE_INT: { @@ -3272,23 +3284,24 @@ char *arithmetic_callback_function(void *param, char *name, int32_t colId) { static void arithmetic_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += pCtx->size; - SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz; + SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, pCtx->size, pCtx->aOutputBuf, sas, pCtx->order, arithmetic_callback_function); - pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; + pCtx->param[1].pz = NULL; } static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); - SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz; + SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; sas->offset = index; tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, 1, pCtx->aOutputBuf, sas, pCtx->order, arithmetic_callback_function); - pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->outputBytes/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; } #define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ @@ -3504,7 +3517,6 @@ void spread_func_sec_merge(SQLFunctionCtx *pCtx) { pCtx->param[3].dKey = pData->max; } - // pCtx->numOfIteratedElems += 1; GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; } @@ -3536,9 +3548,8 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { *(double *)pCtx->aOutputBuf = pInfo->max - pInfo->min; } - - // SET_VAL(pCtx, pCtx->numOfIteratedElems, 1); - resetResultInfo(GET_RES_INFO(pCtx)); + + GET_RES_INFO(pCtx)->numOfRes = 1; // todo add test case } /* @@ -3836,12 +3847,12 @@ static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, dou dsum += fv; if (fmin > fv) { fmin = fv; - minIndex = i; + *minIndex = i; } if (fmax < fv) { fmax = fv; - maxIndex = i; + *maxIndex = i; } // if (isNull(&lastVal, TSDB_DATA_TYPE_FLOAT)) { @@ -3889,12 +3900,12 @@ static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, do dsum += dv; if (dmin > dv) { dmin = dv; - minIndex = i; + *minIndex = i; } if (dmax < dv) { dmax = dv; - maxIndex = i; + *maxIndex = i; } // if (isNull(&lastVal, TSDB_DATA_TYPE_DOUBLE)) { @@ -4171,7 +4182,7 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) { } GET_RES_INFO(pCtx)->numOfRes = 1; - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); } /** @@ -4333,7 +4344,7 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) { strcpy(pCtx->aOutputBuf, pTSbuf->path); tsBufDestory(pTSbuf); - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); } /* @@ -4373,7 +4384,7 @@ SQLAggFuncElem aAggs[28] = {{ count_function, count_function_f, no_next_step, - noop1, + doFinalizer, count_func_merge, count_func_merge, count_load_data_info, @@ -4616,7 +4627,7 @@ SQLAggFuncElem aAggs[28] = {{ date_col_output_function, date_col_output_function_f, no_next_step, - noop1, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4631,7 +4642,7 @@ SQLAggFuncElem aAggs[28] = {{ noop1, noop2, no_next_step, - noop1, + doFinalizer, copy_function, copy_function, data_req_load_info, @@ -4646,7 +4657,7 @@ SQLAggFuncElem aAggs[28] = {{ tag_function, noop2, no_next_step, - noop1, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4676,7 +4687,7 @@ SQLAggFuncElem aAggs[28] = {{ tag_function, tag_function_f, no_next_step, - noop1, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4691,7 +4702,7 @@ SQLAggFuncElem aAggs[28] = {{ col_project_function, col_project_function_f, no_next_step, - noop1, + doFinalizer, copy_function, copy_function, data_req_load_info, @@ -4706,7 +4717,7 @@ SQLAggFuncElem aAggs[28] = {{ tag_project_function, tag_project_function_f, no_next_step, - noop1, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4721,7 +4732,7 @@ SQLAggFuncElem aAggs[28] = {{ arithmetic_function, arithmetic_function_f, no_next_step, - noop1, + doFinalizer, copy_function, copy_function, data_req_load_info, @@ -4736,7 +4747,7 @@ SQLAggFuncElem aAggs[28] = {{ diff_function, diff_function_f, no_next_step, - noop1, + doFinalizer, noop1, noop1, data_req_load_info, @@ -4782,7 +4793,7 @@ SQLAggFuncElem aAggs[28] = {{ interp_function, do_sum_f, // todo filter handle no_next_step, - noop1, + doFinalizer, noop1, copy_function, no_data_info, diff --git a/src/client/src/tscJoinProcess.c b/src/client/src/tscJoinProcess.c index a94b308e87a784e55a01f2d19c0c7cba60b3beeb..1bafb60f1a0e487aeaa7b70e1c1817111f8a631d 100644 --- a/src/client/src/tscJoinProcess.c +++ b/src/client/src/tscJoinProcess.c @@ -22,20 +22,7 @@ #include "ttime.h" #include "tutil.h" -static UNUSED_FUNC bool isSubqueryCompleted(SSqlObj* pSql) { - bool hasData = true; - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlRes* pRes = &pSql->pSubs[i]->res; - - // in case inner join, if any subquery exhausted, query completed - if (pRes->numOfRows == 0) { - hasData = false; - break; - } - } - - return hasData; -} +static void freeSubqueryObj(SSqlObj* pSql); static bool doCompare(int32_t order, int64_t left, int64_t right) { if (order == TSQL_SO_ASC) { @@ -53,11 +40,16 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor *st = INT64_MAX; *et = INT64_MIN; - SLimitVal* pLimit = &pSql->cmd.limit; - int32_t order = pSql->cmd.order.order; - - pSql->pSubs[0]->cmd.tsBuf = output1; - pSql->pSubs[1]->cmd.tsBuf = output2; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + + SLimitVal* pLimit = &pQueryInfo->limit; + int32_t order = pQueryInfo->order.order; + + SQueryInfo* pSubQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[0]->cmd, 0); + SQueryInfo* pSubQueryInfo2 = tscGetQueryInfoDetail(&pSql->pSubs[1]->cmd, 0); + + pSubQueryInfo1->tsBuf = output1; + pSubQueryInfo2->tsBuf = output2; tsBufResetPos(pSupporter1->pTSBuf); tsBufResetPos(pSupporter2->pTSBuf); @@ -104,16 +96,19 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor numOfInput2++; } else { - if (*st > elem1.ts) { - *st = elem1.ts; - } - - if (*et < elem1.ts) { - *et = elem1.ts; - } - - // in case of stable query, limit/offset is not applied here - if (pLimit->offset == 0 || pSql->cmd.nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pSql->cmd.type)) { + /* + * in case of stable query, limit/offset is not applied here. the limit/offset is applied to the + * final results which is acquired after the secondry merge of in the client. + */ + if (pLimit->offset == 0 || pQueryInfo->nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { + if (*st > elem1.ts) { + *st = elem1.ts; + } + + if (*et < elem1.ts) { + *et = elem1.ts; + } + tsBufAppend(output1, elem1.vnode, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); tsBufAppend(output2, elem2.vnode, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); } else { @@ -150,15 +145,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor tsBufDestory(pSupporter1->pTSBuf); tsBufDestory(pSupporter2->pTSBuf); - tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks intersecting", pSql, - numOfInput1, numOfInput2, output1->numOfTotal); + tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks " + "intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql, + numOfInput1, numOfInput2, output1->numOfTotal, *st, *et); return output1->numOfTotal; } // todo handle failed to create sub query -SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, - /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) { +SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index) { SJoinSubquerySupporter* pSupporter = calloc(1, sizeof(SJoinSubquerySupporter)); if (pSupporter == NULL) { return NULL; @@ -168,11 +163,15 @@ SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pS pSupporter->pState = pState; pSupporter->subqueryIndex = index; - pSupporter->interval = pSql->cmd.nAggTimeInterval; - pSupporter->limit = pSql->cmd.limit; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + + pSupporter->interval = pQueryInfo->nAggTimeInterval; + pSupporter->limit = pQueryInfo->limit; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, index); pSupporter->uid = pMeterMetaInfo->pMeterMeta->uid; + + assert (pSupporter->uid != 0); getTmpfilePath("join-", pSupporter->path); pSupporter->f = fopen(pSupporter->path, "w"); @@ -189,7 +188,7 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) { return; } - tfree(pSupporter->exprsInfo.pExprs); + tscSqlExprInfoDestroy(&pSupporter->exprsInfo); tscColumnBaseInfoDestroy(&pSupporter->colList); tscClearFieldInfo(&pSupporter->fieldsInfo); @@ -209,10 +208,9 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) { * primary timestamp column , the secondary query is not necessary * */ -bool needSecondaryQuery(SSqlObj* pSql) { - SSqlCmd* pCmd = &pSql->cmd; - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { - SColumnBase* pBase = tscColumnBaseInfoGet(&pCmd->colList, i); +bool needSecondaryQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { + SColumnBase* pBase = tscColumnBaseInfoGet(&pQueryInfo->colList, i); if (pBase->colIndex.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return true; } @@ -224,110 +222,147 @@ bool needSecondaryQuery(SSqlObj* pSql) { /* * launch secondary stage query to fetch the result that contains timestamp in set */ -int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { +int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql) { int32_t numOfSub = 0; SJoinSubquerySupporter* pSupporter = NULL; - + + /* + * If the columns are not involved in the final select clause, the secondary query will not be launched + * for the subquery. + */ + SSubqueryState* pState = NULL; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { pSupporter = pSql->pSubs[i]->param; - pSupporter->pState->numOfCompleted = 0; - - /* - * If the columns are not involved in the final select clause, the secondary query will not be launched - * for the subquery. - */ if (pSupporter->exprsInfo.numOfExprs > 0) { ++numOfSub; } } - + + assert(numOfSub > 0); + // scan all subquery, if one sub query has only ts, ignore it - tscTrace( - "%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in " - "select clause", - pSql, pSql->numOfSubs, numOfSub); + tscTrace("%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in " + "select clause", pSql, pSql->numOfSubs, numOfSub); - int32_t j = 0; + /* + * the subqueries that do not actually launch the secondary query to virtual node is set as completed. + */ + pState = pSupporter->pState; + pState->numOfTotal = pSql->numOfSubs; + pState->numOfCompleted = (pSql->numOfSubs - numOfSub); + + bool success = true; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - pSupporter = pSub->param; - pSupporter->pState->numOfTotal = numOfSub; - + SSqlObj *pPrevSub = pSql->pSubs[i]; + pSql->pSubs[i] = NULL; + + pSupporter = pPrevSub->param; + if (pSupporter->exprsInfo.numOfExprs == 0) { + tscTrace("%p subIndex: %d, not need to launch query, ignore it", pSql, i); + tscDestroyJoinSupporter(pSupporter); - taos_free_result(pSub); + tscFreeSqlObj(pPrevSub); + + pSql->pSubs[i] = NULL; continue; } - - SSqlObj* pNew = createSubqueryObj(pSql, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL); + + SQueryInfo *pSubQueryInfo = tscGetQueryInfoDetail(&pPrevSub->cmd, 0); + STSBuf *pTSBuf = pSubQueryInfo->tsBuf; + pSubQueryInfo->tsBuf = NULL; + + // free result for async object will also free sqlObj + taos_free_result(pPrevSub); + + SSqlObj *pNew = createSubqueryObj(pSql, (int16_t) i, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { - pSql->numOfSubs = i; // revise the number of subquery - pSupporter->pState->numOfTotal = i; - - pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscDestroyJoinSupporter(pSupporter); - return 0; + success = false; + break; } - - tscFreeSqlCmdData(&pNew->cmd); - - pSql->pSubs[j++] = pNew; - pNew->cmd.tsBuf = pSub->cmd.tsBuf; - pSub->cmd.tsBuf = NULL; - - taos_free_result(pSub); - + + tscClearSubqueryInfo(&pNew->cmd); + pSql->pSubs[i] = pNew; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object + // set the second stage sub query for join process - pNew->cmd.type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE; - - pNew->cmd.nAggTimeInterval = pSupporter->interval; - pNew->cmd.groupbyExpr = pSupporter->groupbyExpr; - - tscColumnBaseInfoCopy(&pNew->cmd.colList, &pSupporter->colList, 0); - tscTagCondCopy(&pNew->cmd.tagCond, &pSupporter->tagCond); - - tscSqlExprCopy(&pNew->cmd.exprsInfo, &pSupporter->exprsInfo, pSupporter->uid); - tscFieldInfoCopyAll(&pSupporter->fieldsInfo, &pNew->cmd.fieldsInfo); - - // add the ts function for interval query if it is missing - if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS && pNew->cmd.nAggTimeInterval > 0) { - tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0); + pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE; + + pQueryInfo->nAggTimeInterval = pSupporter->interval; + pQueryInfo->groupbyExpr = pSupporter->groupbyExpr; + + tscColumnBaseInfoCopy(&pQueryInfo->colList, &pSupporter->colList, 0); + tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond); + + tscSqlExprCopy(&pQueryInfo->exprsInfo, &pSupporter->exprsInfo, pSupporter->uid); + tscFieldInfoCopyAll(&pQueryInfo->fieldsInfo, &pSupporter->fieldsInfo); + + /* + * if the first column of the secondary query is not ts function, add this function. + * Because this column is required to filter with timestamp after intersecting. + */ + if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS) { + tscAddTimestampColumn(pQueryInfo, TSDB_FUNC_TS, 0); } - + // todo refactor function name - tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0); - tscFieldInfoCalOffset(&pNew->cmd); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); - + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + assert(pNew->numOfSubs == 0 && pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1); + + tscFieldInfoCalOffset(pNewQueryInfo); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pNewQueryInfo, 0); + /* * When handling the projection query, the offset value will be modified for table-table join, which is changed * during the timestamp intersection. */ - pSupporter->limit = pSql->cmd.limit; - pNew->cmd.limit = pSupporter->limit; - + pSupporter->limit = pQueryInfo->limit; + pNewQueryInfo->limit = pSupporter->limit; + // fetch the join tag column - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0); - assert(pNew->cmd.tagCond.joinInfo.hasJoin); - - int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd.tagCond, pMeterMetaInfo->pMeterMeta->uid); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0); + assert(pQueryInfo->tagCond.joinInfo.hasJoin); + + int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid); pExpr->param[0].i64Key = tagColIndex; pExpr->numOfParams = 1; } - -#ifdef _DEBUG_VIEW - tscPrintSelectClause(&pNew->cmd); -#endif - - tscProcessSql(pNew); + + tscPrintSelectClause(pNew, 0); + + tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s", + pSql, pNew, 0, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, + pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols, + pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name); + } + + //prepare the subqueries object failed, abort + if (!success) { + pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql, + pSql->numOfSubs, pSql->res.code); + freeSubqueryObj(pSql); + + return pSql->res.code; + } + + for(int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + tscProcessSql(pSub); } - // revise the number of subs - pSql->numOfSubs = j; - - return 0; + return TSDB_CODE_SUCCESS; } static void freeSubqueryObj(SSqlObj* pSql) { @@ -360,7 +395,10 @@ static void doQuitSubquery(SSqlObj* pParentSql) { } static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter) { - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { pSqlObj->res.code = abs(pSupporter->pState->code); tscError("%p all subquery return and query failed, global code:%d", pSqlObj, pSqlObj->res.code); @@ -369,11 +407,11 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter } // update the query time range according to the join results on timestamp -static void updateQueryTimeRange(SSqlObj* pSql, int64_t st, int64_t et) { - assert(pSql->cmd.stime <= st && pSql->cmd.etime >= et); +static void updateQueryTimeRange(SQueryInfo* pQueryInfo, int64_t st, int64_t et) { + assert(pQueryInfo->stime <= st && pQueryInfo->etime >= et); - pSql->cmd.stime = st; - pSql->cmd.etime = et; + pQueryInfo->stime = st; + pQueryInfo->etime = et; } static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { @@ -381,8 +419,12 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { SSqlObj* pParentSql = pSupporter->pObj; SSqlObj* pSql = (SSqlObj*)tres; - - if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) { + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) { if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pSupporter->pState->code); @@ -408,8 +450,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path); pSupporter->pTSBuf = pBuf; } else { - assert(pSql->cmd.numOfTables == 1); // for subquery, only one metermetaInfo - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + assert(pQueryInfo->numOfTables == 1); // for subquery, only one metermetaInfo + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex); tsBufDestory(pBuf); @@ -422,12 +464,19 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { taos_fetch_rows_a(tres, joinRetrieveCallback, param); } else if (numOfRows == 0) { // no data from this vnode anymore - if (tscProjectionQueryOnMetric(&pParentSql->cmd)) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - assert(pSql->cmd.numOfTables == 1); + SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex); + + //todo refactor + if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1); // for projection query, need to try next vnode - if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes; + if ((++pMeterMetaInfo->vnodeIndex) < totalVnode) { + tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql, + pMeterMetaInfo->vnodeIndex - 1, pMeterMetaInfo->vnodeIndex, totalVnode, pRes->numOfTotal); + pSql->cmd.command = TSDB_SQL_SELECT; pSql->fp = tscJoinQueryCallback; tscProcessSql(pSql); @@ -435,8 +484,13 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { return; } } - - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { + assert(finished == numOfTotal); + if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { tscTrace("%p sub:%p, numOfSub:%d, quit from further procedure due to other queries failure", pParentSql, tres, pSupporter->subqueryIndex); @@ -456,8 +510,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscTrace("%p free all sub SqlObj and quit", pParentSql); doQuitSubquery(pParentSql); } else { - updateQueryTimeRange(pParentSql, st, et); - tscLaunchSecondSubquery(pParentSql); + updateQueryTimeRange(pParentQueryInfo, st, et); + tscLaunchSecondPhaseSubqueries(pParentSql); } } } else { // failure of sub query @@ -477,10 +531,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { if (numOfRows >= 0) { pSql->res.numOfTotal += pSql->res.numOfRows; } - - if (tscProjectionQueryOnMetric(&pSql->cmd) && numOfRows == 0) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - assert(pSql->cmd.numOfTables == 1); + + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && numOfRows == 0) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1); // for projection query, need to try next vnode if current vnode is exhausted if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { @@ -494,11 +548,13 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { return; } } - - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { - assert(pSupporter->pState->numOfCompleted == pSupporter->pState->numOfTotal); - - tscTrace("%p all %d secondary retrieves are completed, global code:%d", tres, pSupporter->pState->numOfTotal, + + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { + assert(finished == numOfTotal); + tscTrace("%p all %d secondary subquery retrieves completed, global code:%d", tres, numOfTotal, pParentSql->res.code); if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { @@ -507,57 +563,83 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { } tsem_post(&pParentSql->rspSem); + } else { + tscTrace("%p sub:%p completed, completed:%d, total:%d", pParentSql, tres, finished, numOfTotal); } } } +static SJoinSubquerySupporter* tscUpdateSubqueryStatus(SSqlObj* pSql, int32_t numOfFetch) { + int32_t notInvolved = 0; + SJoinSubquerySupporter* pSupporter = NULL; + SSubqueryState* pState = NULL; + + for(int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == NULL) { + notInvolved++; + } else { + pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param; + pState = pSupporter->pState; + } + } + + pState->numOfTotal = pSql->numOfSubs; + pState->numOfCompleted = pSql->numOfSubs - numOfFetch; + + return pSupporter; +} + void tscFetchDatablockFromSubquery(SSqlObj* pSql) { int32_t numOfFetch = 0; - assert(pSql->numOfSubs >= 1); - + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == NULL) { // this subquery does not need to involve in secondary query + continue; + } + SSqlRes *pRes = &pSql->pSubs[i]->res; - SSqlCmd *pCmd = &pSql->pSubs[i]->cmd; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0); - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - if (tscProjectionQueryOnMetric(pCmd)) { + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes && - (!tscHasReachLimitation(pSql->pSubs[i]))) { + (!tscHasReachLimitation(pQueryInfo, pRes))) { numOfFetch++; } } else { - if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pSql->pSubs[i]))) { + if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pQueryInfo, pRes))) { numOfFetch++; } } } if (numOfFetch <= 0) { - return ; + return; } // TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch); - SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param; - pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed - pSupporter->pState->numOfCompleted = 0; - + SJoinSubquerySupporter* pSupporter = tscUpdateSubqueryStatus(pSql, numOfFetch); + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { SSqlObj* pSql1 = pSql->pSubs[i]; - + if (pSql1 == NULL) { + continue; + } + SSqlRes* pRes1 = &pSql1->res; SSqlCmd* pCmd1 = &pSql1->cmd; pSupporter = (SJoinSubquerySupporter*)pSql1->param; // wait for all subqueries completed - pSupporter->pState->numOfTotal = numOfFetch; - assert(pRes1->numOfRows >= 0 && pCmd1->numOfTables == 1); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd1, 0); + assert(pRes1->numOfRows >= 0 && pQueryInfo->numOfTables == 1); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); if (pRes1->row >= pRes1->numOfRows) { tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1, @@ -576,6 +658,16 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) { // wait for all subquery completed tsem_wait(&pSql->rspSem); + + // update the records for each subquery + for(int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == NULL) { + continue; + } + + SSqlRes* pRes1 = &pSql->pSubs[i]->res; + pRes1->numOfTotalInCurrentClause += pRes1->numOfRows; + } } // all subqueries return, set the result output index @@ -589,26 +681,28 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { return; // the column transfer support struct has been built } - pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pQueryInfo->fieldsInfo.numOfOutputCols); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t tableIndexOfSub = -1; - for (int32_t j = 0; j < pCmd->numOfTables; ++j) { - SSqlObj* pSub = pSql->pSubs[j]; - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSub->cmd, 0); + for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, j); if (pMeterMetaInfo->pMeterMeta->uid == pExpr->uid) { tableIndexOfSub = j; break; } } + assert(tableIndexOfSub >= 0 && tableIndexOfSub < pQueryInfo->numOfTables); + SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd; - - for (int32_t k = 0; k < pSubCmd->exprsInfo.numOfExprs; ++k) { - SSqlExpr* pSubExpr = tscSqlExprGet(pSubCmd, k); + SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0); + + for (int32_t k = 0; k < pSubQueryInfo->exprsInfo.numOfExprs; ++k) { + SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k); if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) { pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k}; break; @@ -619,7 +713,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { SSqlObj* pSql = (SSqlObj*)tres; - // SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + // SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); // int32_t idx = pSql->cmd.vnodeIdx; @@ -643,12 +737,13 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { // // no qualified result // } // - // tscLaunchSecondSubquery(pSql, ts, num); + // tscLaunchSecondPhaseSubqueries(pSql, ts, num); // } else { // } // } else { - if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { if (code != TSDB_CODE_SUCCESS) { // direct call joinRetrieveCallback and set the error code joinRetrieveCallback(param, pSql, code); } else { // first stage query, continue to retrieve data @@ -675,16 +770,20 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { quitAllSubquery(pParentSql, pSupporter); } else { - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { + assert(finished == numOfTotal); + tscSetupOutputColumnIndex(pParentSql); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); /** * if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of * data instead of returning to its invoker */ - if (pMeterMetaInfo->vnodeIndex > 0 && tscProjectionQueryOnMetric(&pSql->cmd)) { + if (pMeterMetaInfo->vnodeIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes); pSupporter->pState->numOfCompleted = 0; // reset the record value @@ -793,7 +892,9 @@ STSBuf* tsBufCreate(bool autoDelete) { return NULL; } - allocResForTSBuf(pTSBuf); + if (NULL == allocResForTSBuf(pTSBuf)) { + return NULL; + } // update the header info STSBufFileHeader header = {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = TSQL_SO_ASC}; @@ -818,7 +919,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { pTSBuf->f = fopen(pTSBuf->path, "r+"); if (pTSBuf->f == NULL) { - free(pTSBuf); + free(pTSBuf); return NULL; } @@ -891,9 +992,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { return pTSBuf; } -void tsBufDestory(STSBuf* pTSBuf) { +void* tsBufDestory(STSBuf* pTSBuf) { if (pTSBuf == NULL) { - return; + return NULL; } tfree(pTSBuf->assistBuf); @@ -912,6 +1013,7 @@ void tsBufDestory(STSBuf* pTSBuf) { } free(pTSBuf); + return NULL; } static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) { diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 813ba141184bfc747d6dc862aebdf6394751c110..402838bb680b7d4e61c3e17415eb4b7450944ccd 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -77,7 +77,7 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type * length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t) */ static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) { - SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta; + SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->pMeterMeta; if (pMeta->meterType == TSDB_METER_METRIC || pMeta->meterType == TSDB_METER_OTABLE || pMeta->meterType == TSDB_METER_STABLE) { @@ -106,8 +106,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { SSqlRes *pRes = &pSql->res; // one column for each row - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SMeterMeta * pMeta = pMeterMetaInfo->pMeterMeta; /* @@ -119,7 +120,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { int32_t numOfRows = pMeta->numOfColumns; int32_t totalNumOfRows = numOfRows + pMeta->numOfTags; - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { numOfRows = pMeta->numOfColumns + pMeta->numOfTags; } @@ -127,31 +128,31 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { SSchema *pSchema = tsGetSchema(pMeta); for (int32_t i = 0; i < numOfRows; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, TSDB_COL_NAME_LEN); char *type = tDataTypeDesc[pSchema[i].type].aName; - pField = tscFieldInfoGetField(pCmd, 1); - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); + pField = tscFieldInfoGetField(pQueryInfo, 1); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); int32_t bytes = pSchema[i].bytes; if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { bytes = bytes / TSDB_NCHAR_SIZE; } - pField = tscFieldInfoGetField(pCmd, 2); - *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes; + pField = tscFieldInfoGetField(pQueryInfo, 2); + *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes; - pField = tscFieldInfoGetField(pCmd, 3); + pField = tscFieldInfoGetField(pQueryInfo, 3); if (i >= pMeta->numOfColumns && pMeta->numOfTags != 0) { - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i, "tag", + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i, "tag", strlen("tag") + 1); } } - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return 0; } @@ -159,27 +160,27 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { char *pTagValue = tsGetTagsValue(pMeta); for (int32_t i = numOfRows; i < totalNumOfRows; ++i) { // field name - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, TSDB_COL_NAME_LEN); // type name - pField = tscFieldInfoGetField(pCmd, 1); + pField = tscFieldInfoGetField(pQueryInfo, 1); char *type = tDataTypeDesc[pSchema[i].type].aName; - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); // type length int32_t bytes = pSchema[i].bytes; - pField = tscFieldInfoGetField(pCmd, 2); + pField = tscFieldInfoGetField(pQueryInfo, 2); if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { bytes = bytes / TSDB_NCHAR_SIZE; } - *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes; + *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes; // tag value - pField = tscFieldInfoGetField(pCmd, 3); - char *target = pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i; + pField = tscFieldInfoGetField(pQueryInfo, 3); + char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i; if (isNull(pTagValue, pSchema[i].type)) { sprintf(target, "%s", TSDB_DATA_NULL_STR); @@ -236,25 +237,28 @@ static int32_t tscBuildMeterSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, SSqlCmd *pCmd = &pSql->cmd; pCmd->numOfCols = numOfCols; - pCmd->order.order = TSQL_SO_ASC; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + pQueryInfo->order.order = TSQL_SO_ASC; - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN); rowLen += TSDB_COL_NAME_LEN; - tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength); rowLen += typeColLength; - tscFieldInfoSetValue(&pCmd->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t)); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t)); rowLen += sizeof(int32_t); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength); rowLen += noteColLength; return rowLen; } static int32_t tscProcessDescribeTable(SSqlObj *pSql) { - assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta != NULL); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + assert(tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta != NULL); const int32_t NUM_OF_DESCRIBE_TABLE_COLUMNS = 4; const int32_t TYPE_COLUMN_LENGTH = 16; @@ -267,7 +271,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) { int32_t rowLen = tscBuildMeterSchemaResultFields(pSql, NUM_OF_DESCRIBE_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, note_field_length); - tscFieldInfoCalOffset(&pSql->cmd); + tscFieldInfoCalOffset(pQueryInfo); return tscSetValueToResObj(pSql, rowLen); } @@ -277,7 +281,9 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) { // only need to reorganize the results in the column format SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; SSchema * pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); @@ -294,7 +300,7 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) { } int32_t totalNumOfResults = pMetricMeta->numOfMeters; - int32_t rowLen = tscGetResRowLength(pCmd); + int32_t rowLen = tscGetResRowLength(pQueryInfo); tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen); @@ -304,17 +310,17 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) { for (int32_t j = 0; j < pSidList->numOfSids; ++j) { SMeterSidExtInfo *pSidExt = tscGetMeterSidInfo(pSidList, j); - - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SColIndexEx *pColIndex = &tscSqlExprGet(pCmd, k)->colInfo; + + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SColIndexEx *pColIndex = &tscSqlExprGet(pQueryInfo, k)->colInfo; int16_t offsetId = pColIndex->colIdx; assert((pColIndex->flag & TSDB_COL_TAG) != 0); char * val = pSidExt->tags + vOffset[offsetId]; - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k); - memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, k) * totalNumOfResults + pField->bytes * rowIdx, val, + memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, k) * totalNumOfResults + pField->bytes * rowIdx, val, (size_t)pField->bytes); } rowIdx++; @@ -328,21 +334,23 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - SMetricMeta *pMetricMeta = tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMetricMeta *pMetricMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMetricMeta; int32_t totalNumOfResults = 1; // count function only produce one result - int32_t rowLen = tscGetResRowLength(pCmd); + int32_t rowLen = tscGetResRowLength(pQueryInfo); tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen); int32_t rowIdx = 0; for (int32_t i = 0; i < totalNumOfResults; ++i) { - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->colInfo.colIdx == -1 && pExpr->functionId == TSDB_FUNC_COUNT) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k); - memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, i) * totalNumOfResults + pField->bytes * rowIdx, + memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, i) * totalNumOfResults + pField->bytes * rowIdx, &pMetricMeta->numOfMeters, sizeof(pMetricMeta->numOfMeters)); } else { tscError("not support operations"); @@ -357,15 +365,17 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) { static int tscProcessQueryTags(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; - - SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMeterMeta *pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta; if (pMeterMeta == NULL || pMeterMeta->numOfTags == 0 || pMeterMeta->numOfColumns == 0) { strcpy(pCmd->payload, "invalid table"); pSql->res.code = TSDB_CODE_INVALID_TABLE; return pSql->res.code; } - SSqlExpr *pExpr = tscSqlExprGet(pCmd, 0); + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0); if (pExpr->functionId == TSDB_FUNC_COUNT) { return tscBuildMetricTagSqlFunctionResult(pSql); } else { @@ -374,7 +384,9 @@ static int tscProcessQueryTags(SSqlObj *pSql) { } static void tscProcessCurrentUser(SSqlObj *pSql) { - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, pSql->pTscObj->user, pExpr->aliasName, TSDB_USER_LEN); } @@ -387,19 +399,24 @@ static void tscProcessCurrentDB(SSqlObj *pSql) { setNull(db, TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN); } - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, db, pExpr->aliasName, TSDB_DB_NAME_LEN); } static void tscProcessServerVer(SSqlObj *pSql) { const char* v = pSql->pTscObj->sversion; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, v, pExpr->aliasName, tListLen(pSql->pTscObj->sversion)); } static void tscProcessClientVer(SSqlObj *pSql) { - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, version, pExpr->aliasName, strlen(version)); } @@ -417,7 +434,9 @@ static void tscProcessServStatus(SSqlObj *pSql) { } } - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, "1", pExpr->aliasName, 2); } @@ -426,12 +445,16 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa SSqlRes *pRes = &pSql->res; pCmd->numOfCols = 1; - pCmd->order.order = TSQL_SO_ASC; - - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength); + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + pQueryInfo->order.order = TSQL_SO_ASC; + + tscClearFieldInfo(&pQueryInfo->fieldsInfo); + + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength); tscInitResObjForLocalQuery(pSql, 1, valueLength); - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0); strncpy(pRes->data, val, pField->bytes); } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 985fa588a488ae806a1752f046d2b16f209b14e0..6ef28a55f60fc847dc36de26856682d2fb6b65c8 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -19,7 +19,7 @@ #define _XOPEN_SOURCE #include "os.h" -#include "ihash.h" +#include "hash.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tschemautil.h" @@ -36,7 +36,7 @@ enum { TSDB_USE_CLI_TS = 1, }; -static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize); +static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows); static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) { int32_t numType = isValidNumber(pToken); @@ -71,8 +71,6 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) { } int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { - //char * token; //fang not used - //int tokenlen; //fang not used int32_t index = 0; SSQLToken sToken; int64_t interval; @@ -115,13 +113,12 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1 index = 0; sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL); pTokenEnd += index; - + if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) { - index = 0; valueToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL); pTokenEnd += index; - + if (valueToken.n < 2) { return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z); } @@ -129,7 +126,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1 if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - + if (timePrec == TSDB_TIME_PRECISION_MILLI) { interval /= 1000; } @@ -152,8 +149,8 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, int64_t iv; int32_t numType; char * endptr = NULL; - errno = 0; // clear the previous existed error information - + errno = 0; // clear the previous existed error information + switch (pSchema->type) { case TSDB_DATA_TYPE_BOOL: { // bool if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { @@ -193,7 +190,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, return tscInvalidSQLErrMsg(msg, "tinyint data overflow", pToken->z); } - *((int8_t *)payload) = (int8_t) iv; + *((int8_t *)payload) = (int8_t)iv; } break; @@ -378,7 +375,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start } else { if (pDataBlocks->tsSource == TSDB_USE_SERVER_TS) { return -1; // client time/server time can not be mixed - + } else if (pDataBlocks->tsSource == -1) { pDataBlocks->tsSource = TSDB_USE_CLI_TS; } @@ -393,9 +390,9 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start } int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error, - int16_t timePrec, int32_t *code, char* tmpTokenBuf) { - int32_t index = 0; - //bool isPrevOptr; //fang, never used + int16_t timePrec, int32_t *code, char *tmpTokenBuf) { + int32_t index = 0; + // bool isPrevOptr; //fang, never used SSQLToken sToken = {0}; char * payload = pDataBlocks->pData + pDataBlocks->size; @@ -403,8 +400,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ int32_t rowSize = 0; for (int i = 0; i < spd->numOfAssignedCols; ++i) { // the start position in data block buffer of current value in sql - char * start = payload + spd->elems[i].offset; - int16_t colIndex = spd->elems[i].colIndex; + char * start = payload + spd->elems[i].offset; + int16_t colIndex = spd->elems[i].colIndex; SSchema *pSchema = schema + colIndex; rowSize += pSchema->bytes; @@ -417,7 +414,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { continue; } - + strcpy(error, "client out of memory"); *code = TSDB_CODE_CLI_OUT_OF_MEMORY; return -1; @@ -434,10 +431,10 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ // Remove quotation marks if (TK_STRING == sToken.type) { // delete escape character: \\, \', \" - char delim = sToken.z[0]; + char delim = sToken.z[0]; int32_t cnt = 0; int32_t j = 0; - for (int32_t k = 1; k < sToken.n - 1; ++k) { + for (int32_t k = 1; k < sToken.n - 1; ++k) { if (sToken.z[k] == delim || sToken.z[k] == '\\') { if (sToken.z[k + 1] == delim) { cnt++; @@ -447,13 +444,13 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ continue; } } - + tmpTokenBuf[j] = sToken.z[k]; j++; } - tmpTokenBuf[j] = 0; + tmpTokenBuf[j] = 0; sToken.z = tmpTokenBuf; - sToken.n -= 2 + cnt; + sToken.n -= 2 + cnt; } bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); @@ -475,7 +472,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ char *ptr = payload; for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null + if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null setNull(ptr, schema[i].type, schema[i].bytes); } @@ -500,7 +497,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) { } int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMeta, int maxRows, - SParsedDataColInfo *spd, char *error, int32_t *code, char* tmpTokenBuf) { + SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) { int32_t index = 0; SSQLToken sToken; @@ -522,18 +519,19 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe *str += index; if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) { - int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize); - if (0 == tSize) { //TODO pass the correct error code to client + int32_t tSize; + int32_t retcode = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize, &tSize); + if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client strcpy(error, "client out of memory"); - *code = TSDB_CODE_CLI_OUT_OF_MEMORY; + *code = retcode; return -1; } - - maxRows += tSize; + ASSERT(tSize > maxRows); + maxRows = tSize; } int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf); - if (len <= 0) { // error message has been set in tsParseOneRowData + if (len <= 0) { // error message has been set in tsParseOneRowData return -1; } @@ -574,11 +572,12 @@ static void tscSetAssignedColumnInfo(SParsedDataColInfo *spd, SSchema *pSchema, } } -int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) { +int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { size_t remain = pDataBlock->nAllocSize - pDataBlock->size; const int factor = 5; uint32_t nAllocSizeOld = pDataBlock->nAllocSize; - + assert(pDataBlock->headerSize >= 0); + // expand the allocated size if (remain < rowSize * factor) { while (remain < rowSize * factor) { @@ -591,14 +590,15 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) { pDataBlock->pData = tmp; memset(pDataBlock->pData + pDataBlock->size, 0, pDataBlock->nAllocSize - pDataBlock->size); } else { - //assert(false); - // do nothing + // do nothing, if allocate more memory failed pDataBlock->nAllocSize = nAllocSizeOld; - return 0; + *numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize; + return TSDB_CODE_CLI_OUT_OF_MEMORY; } } - return (int32_t)(pDataBlock->nAllocSize - pDataBlock->size) / rowSize; + *numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize; + return TSDB_CODE_SUCCESS; } static void tsSetBlockInfo(SShellSubmitBlock *pBlocks, const SMeterMeta *pMeterMeta, int32_t numOfRows) { @@ -654,23 +654,25 @@ void sortRemoveDuplicates(STableDataBlocks *dataBuf) { static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char **str, SParsedDataColInfo *spd, int32_t *totalNum) { SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; STableDataBlocks *dataBuf = NULL; int32_t ret = tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE, - sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name, &dataBuf); + sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name, + pMeterMeta, &dataBuf); if (ret != TSDB_CODE_SUCCESS) { return ret; } - int32_t maxNumOfRows = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize); - if (0 == maxNumOfRows) { + int32_t maxNumOfRows; + ret = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize, &maxNumOfRows); + if (TSDB_CODE_SUCCESS != ret) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } - + int32_t code = TSDB_CODE_INVALID_SQL; - char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" + char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" if (NULL == tmpTokenBuf) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } @@ -682,7 +684,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char } for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) { - SParamInfo* param = dataBuf->params + i; + SParamInfo *param = dataBuf->params + i; if (param->idx == -1) { param->idx = pCmd->numOfParams++; param->offset -= sizeof(SShellSubmitBlock); @@ -703,16 +705,20 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char return TSDB_CODE_SUCCESS; } -static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { +static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { int32_t index = 0; - SSQLToken sToken; - SSQLToken tableToken; + SSQLToken sToken = {0}; + SSQLToken tableToken = {0}; int32_t code = TSDB_CODE_SUCCESS; - - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + + const int32_t TABLE_INDEX = 0; + const int32_t STABLE_INDEX = 1; + + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); char *sql = *sqlstr; + // get the token of specified table index = 0; tableToken = tStrGetToken(sql, &index, false, 0, NULL); @@ -749,41 +755,54 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { if (numOfColList == 0 && cstart != NULL) { return TSDB_CODE_INVALID_SQL; } - - if (sToken.type == TK_USING) { // create table if not exists + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, TABLE_INDEX); + + if (sToken.type == TK_USING) { // create table if not exists according to the super table index = 0; sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; STagData *pTag = (STagData *)pCmd->payload; memset(pTag, 0, sizeof(STagData)); - setMeterID(pSql, &sToken, 0); + + /* + * the source super table is moved to the secondary position of the pMeterMetaInfo list + */ + if (pQueryInfo->numOfTables < 2) { + tscAddEmptyMeterMetaInfo(pQueryInfo); + } + + SMeterMetaInfo *pSTableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, STABLE_INDEX); + setMeterID(pSTableMeterMetaInfo, &sToken, pSql); - strncpy(pTag->name, pMeterMetaInfo->name, TSDB_METER_ID_LEN); - code = tscGetMeterMeta(pSql, pTag->name, 0); + strncpy(pTag->name, pSTableMeterMetaInfo->name, TSDB_METER_ID_LEN); + code = tscGetMeterMeta(pSql, pSTableMeterMetaInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (!UTIL_METER_IS_SUPERTABLE(pSTableMeterMetaInfo)) { return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z); } - SSchema *pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); + SSchema *pTagSchema = tsGetTagSchema(pSTableMeterMetaInfo->pMeterMeta); index = 0; sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; - SParsedDataColInfo spd = {0}; - uint8_t numOfTags = pMeterMetaInfo->pMeterMeta->numOfTags; + SParsedDataColInfo spd = {0}; + + uint8_t numOfTags = pSTableMeterMetaInfo->pMeterMeta->numOfTags; spd.numOfCols = numOfTags; // if specify some tags column if (sToken.type != TK_LP) { tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags); } else { - /* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen) tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */ + /* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen) + * tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */ int16_t offset[TSDB_MAX_COLUMNS] = {0}; for (int32_t t = 1; t < numOfTags; ++t) { offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes; @@ -810,14 +829,14 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { for (int32_t t = 0; t < numOfTags; ++t) { if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) { SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++]; - pElem->offset = offset[t]; + pElem->offset = offset[t]; pElem->colIndex = t; if (spd.hasVal[t] == true) { return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z); } - spd.hasVal[t] = true; + spd.hasVal[t] = true; findColumnIndex = true; break; } @@ -836,7 +855,7 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; } - + if (sToken.type != TK_TAGS) { return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z); } @@ -844,9 +863,9 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { uint32_t ignoreTokenTypes = TK_LP; uint32_t numOfIgnoreToken = 1; for (int i = 0; i < spd.numOfAssignedCols; ++i) { - char* tagVal = pTag->data + spd.elems[i].offset; + char * tagVal = pTag->data + spd.elems[i].offset; int16_t colIndex = spd.elems[i].colIndex; - + index = 0; sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes); sql += index; @@ -862,13 +881,14 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { sToken.n -= 2; } - code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, pMeterMetaInfo->pMeterMeta->precision); + code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, + pSTableMeterMetaInfo->pMeterMeta->precision); if (code != TSDB_CODE_SUCCESS) { return code; } - if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY || - pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) && sToken.n > pTagSchema[colIndex].bytes) { + if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY || pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) && + sToken.n > pTagSchema[colIndex].bytes) { return tscInvalidSQLErrMsg(pCmd->payload, "string too long", sToken.z); } } @@ -883,34 +903,38 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { // 2. set the null value for the columns that do not assign values if (spd.numOfAssignedCols < spd.numOfCols) { char *ptr = pTag->data; - + for (int32_t i = 0; i < spd.numOfCols; ++i) { - if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null + if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes); } - + ptr += pTagSchema[i].bytes; - } + } } if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr); } - int32_t ret = setMeterID(pSql, &tableToken, 0); + int32_t ret = setMeterID(pMeterMetaInfo, &tableToken, pSql); if (ret != TSDB_CODE_SUCCESS) { return ret; } createTable = true; - code = tscGetMeterMetaEx(pSql, pMeterMetaInfo->name, true); + code = tscGetMeterMetaEx(pSql, pMeterMetaInfo, true); + if (TSDB_CODE_ACTION_IN_PROGRESS == code) { + return code; + } + } else { if (cstart != NULL) { sql = cstart; } else { sql = sToken.z; } - code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + code = tscGetMeterMeta(pSql, pMeterMetaInfo); } int32_t len = cend - cstart + 1; @@ -935,6 +959,15 @@ int validateTableName(char *tblName, int len) { return tscValidateName(&token); } +static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) { + if (pCmd->dataSourceType != 0 && pCmd->dataSourceType != type) { + return tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sql); + } + + pCmd->dataSourceType = type; + return TSDB_CODE_SUCCESS; +} + /** * usage: insert into table1 values() () table2 values()() * @@ -944,48 +977,68 @@ int validateTableName(char *tblName, int len) { * @param pSql * @return */ -int doParserInsertSql(SSqlObj *pSql, char *str) { +int doParseInsertSql(SSqlObj *pSql, char *str) { SSqlCmd *pCmd = &pSql->cmd; - - int32_t code = TSDB_CODE_INVALID_SQL; + int32_t totalNum = 0; + int32_t code = TSDB_CODE_SUCCESS; - SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); + SMeterMetaInfo *pMeterMetaInfo = NULL; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + assert(pQueryInfo != NULL); + + if (pQueryInfo->numOfTables == 0) { + pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); + } else { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + } if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { return code; } - ASSERT(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) + assert(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) || ((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList))); if ((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) { - pSql->pTableHashList = taosInitIntHash(128, POINTER_BYTES, taosHashInt); + pSql->pTableHashList = taosInitHashTable(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pSql->cmd.pDataBlocks = tscCreateBlockArrayList(); if (NULL == pSql->pTableHashList || NULL == pSql->cmd.pDataBlocks) { code = TSDB_CODE_CLI_OUT_OF_MEMORY; goto _error_clean; } } else { - ASSERT((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList)); + assert((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList)); str = pSql->asyncTblPos; } - tscTrace("%p create data block list for submit data, %p", pSql, pSql->cmd.pDataBlocks); + tscTrace("%p create data block list for submit data:%p, asyncTblPos:%p, pTableHashList:%p", pSql, pSql->cmd.pDataBlocks, pSql->asyncTblPos, pSql->pTableHashList); while (1) { - int32_t index = 0; + int32_t index = 0; SSQLToken sToken = tStrGetToken(str, &index, false, 0, NULL); - if (sToken.n == 0) { // parse file, do not release the STableDataBlock - if (pCmd->isInsertFromFile == 1) { + + // no data in the sql string anymore. + if (sToken.n == 0) { + /* + * if the data is from the data file, no data has been generated yet. So, there no data to + * merge or submit, save the file path and parse the file in other routines. + */ + if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) { goto _clean; } - if (totalNum > 0) { - break; - } else { // no data in current sql string, error + /* + * if no data has been generated during parsing the sql string, error msg will return + * Otherwise, create the first submit block and submit to virtual node. + */ + if (totalNum == 0) { code = TSDB_CODE_INVALID_SQL; goto _error_clean; + } else { + break; } } @@ -997,32 +1050,35 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { goto _error_clean; } - //TODO refactor - if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) { + if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) { goto _error_clean; } void *fp = pSql->fp; - if ((code = tscParseSqlForCreateTableOnDemand(&str, pSql)) != TSDB_CODE_SUCCESS) { + ptrdiff_t pos = pSql->asyncTblPos - pSql->sqlstr; + + if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) { + /* + * For async insert, after get the metermeta from server, the sql string will not be + * parsed using the new metermeta to avoid the overhead cause by get metermeta data information. + * And during the getMeterMetaCallback function, the sql string will be parsed from the + * interrupted position. + */ if (fp != NULL) { if (TSDB_CODE_ACTION_IN_PROGRESS == code) { - tscTrace("async insert and waiting to get meter meta, then continue parse sql: %s", pSql->asyncTblPos); + tscTrace("async insert and waiting to get meter meta, then continue parse sql from offset: %" PRId64, pos); return code; } - tscTrace("async insert parse error, code:%d, %s", code, tsError[code]); + + // todo add to return + tscError("async insert parse error, code:%d, %s", code, tsError[code]); pSql->asyncTblPos = NULL; - goto _error_clean; // TODO: should _clean or _error_clean to async flow ???? - } else { - /* - * for async insert, the free data block operations, which is tscDestroyBlockArrayList, - * must be executed before launch another threads to get metermeta, since the - * later ops may manipulate SSqlObj through another thread in getMeterMetaCallback function. - */ - goto _error_clean; } + + goto _error_clean; // TODO: should _clean or _error_clean to async flow ???? } - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL); goto _error_clean; } @@ -1030,8 +1086,9 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { index = 0; sToken = tStrGetToken(str, &index, false, 0, NULL); str += index; + if (sToken.n == 0) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z); goto _error_clean; } @@ -1041,13 +1098,8 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns); - if (pCmd->isInsertFromFile == -1) { - pCmd->isInsertFromFile = 0; - } else { - if (pCmd->isInsertFromFile == 1) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); - goto _error_clean; - } + if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { + goto _error_clean; } /* @@ -1059,13 +1111,8 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { goto _error_clean; } } else if (sToken.type == TK_FILE) { - if (pCmd->isInsertFromFile == -1) { - pCmd->isInsertFromFile = 1; - } else { - if (pCmd->isInsertFromFile == 0) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); - goto _error_clean; - } + if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) { + goto _error_clean; } index = 0; @@ -1089,23 +1136,22 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { wordfree(&full_path); STableDataBlocks *pDataBlock = NULL; - int32_t ret = tscCreateDataBlock(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize, sizeof(SShellSubmitBlock), - pMeterMetaInfo->name, &pDataBlock); + SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; + + int32_t ret = tscCreateDataBlock(PATH_MAX, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name, + pMeterMeta, &pDataBlock); if (ret != TSDB_CODE_SUCCESS) { goto _error_clean; } - + tscAppendDataBlock(pCmd->pDataBlocks, pDataBlock); strcpy(pDataBlock->filename, fname); } else if (sToken.type == TK_LP) { /* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */ - SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta; + SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta; SSchema * pSchema = tsGetSchema(pMeterMeta); - if (pCmd->isInsertFromFile == -1) { - pCmd->isInsertFromFile = 0; - } else if (pCmd->isInsertFromFile == 1) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); + if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { goto _error_clean; } @@ -1186,7 +1232,7 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { if (pCmd->numOfParams > 0) { goto _clean; } - + // submit to more than one vnode if (pCmd->pDataBlocks->nSize > 0) { // merge according to vgid @@ -1199,8 +1245,8 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { goto _error_clean; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - + pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + // set the next sent data vnode index in data block arraylist pMeterMetaInfo->vnodeIndex = 1; } else { @@ -1214,12 +1260,16 @@ _error_clean: pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); _clean: - taosCleanUpIntHash(pSql->pTableHashList); + taosCleanUpHashTable(pSql->pTableHashList); + pSql->pTableHashList = NULL; + pSql->asyncTblPos = NULL; + pCmd->isParseFinish = 1; + return code; } -int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) { +int tsParseInsertSql(SSqlObj *pSql) { if (!pSql->pTscObj->writeAuth) { return TSDB_CODE_NO_RIGHTS; } @@ -1227,37 +1277,36 @@ int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) { int32_t index = 0; SSqlCmd *pCmd = &pSql->cmd; - SSQLToken sToken = tStrGetToken(sql, &index, false, 0, NULL); - + SSQLToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL); assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT); - pCmd->import = (sToken.type == TK_IMPORT); - - sToken = tStrGetToken(sql, &index, false, 0, NULL); + + pCmd->count = 0; + pCmd->command = TSDB_SQL_INSERT; + + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo); + + uint16_t type = (sToken.type == TK_INSERT)? TSDB_QUERY_TYPE_INSERT:TSDB_QUERY_TYPE_IMPORT; + TSDB_QUERY_SET_TYPE(pQueryInfo->type, type); + + sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL); if (sToken.type != TK_INTO) { return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z); } - - pCmd->count = 0; - pCmd->command = TSDB_SQL_INSERT; - pCmd->isInsertFromFile = -1; + pSql->res.numOfRows = 0; - - return doParserInsertSql(pSql, sql + index); + return doParseInsertSql(pSql, pSql->sqlstr + index); } -int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) { +int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion) { int32_t ret = TSDB_CODE_SUCCESS; - // must before clean the sqlcmd object - tscRemoveAllMeterMetaInfo(&pSql->cmd, false); - if (NULL == pSql->asyncTblPos) { tscCleanSqlCmd(&pSql->cmd); } else { tscTrace("continue parse sql: %s", pSql->asyncTblPos); } - if (tscIsInsertOrImportData(pSql->sqlstr)) { /* * only for async multi-vnode insertion @@ -1272,11 +1321,11 @@ int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) { pSql->fp = tscAsyncInsertMultiVnodesProxy; } - ret = tsParseInsertSql(pSql, pSql->sqlstr, acct, db); + ret = tsParseInsertSql(pSql); } else { ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); if (TSDB_CODE_SUCCESS != ret) return ret; - + SSqlInfo SQLInfo = {0}; tSQLParse(&SQLInfo, pSql->sqlstr); @@ -1299,7 +1348,8 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock int32_t code = TSDB_CODE_SUCCESS; SSqlCmd *pCmd = &pSql->cmd; - SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta; + assert(pCmd->numOfClause == 1); + SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta; SShellSubmitBlock *pBlocks = (SShellSubmitBlock *)(pTableDataBlocks->pData); tsSetBlockInfo(pBlocks, pMeterMeta, numOfRows); @@ -1326,54 +1376,51 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) { char * line = NULL; size_t n = 0; int len = 0; - uint32_t maxRows = 0; + int32_t maxRows = 0; SSqlCmd * pCmd = &pSql->cmd; int numOfRows = 0; int32_t code = 0; int nrows = 0; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; + assert(pCmd->numOfClause == 1); + int32_t rowSize = pMeterMeta->rowSize; pCmd->pDataBlocks = tscCreateBlockArrayList(); STableDataBlocks *pTableDataBlock = NULL; - int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), - pMeterMetaInfo->name, &pTableDataBlock); + int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, rowSize, sizeof(SShellSubmitBlock), + pMeterMetaInfo->name, pMeterMeta, &pTableDataBlock); if (ret != TSDB_CODE_SUCCESS) { return -1; } - + tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock); - maxRows = tscAllocateMemIfNeed(pTableDataBlock, rowSize); - if (maxRows < 1) return -1; + code = tscAllocateMemIfNeed(pTableDataBlock, rowSize, &maxRows); + if (TSDB_CODE_SUCCESS != code) return -1; int count = 0; - SParsedDataColInfo spd = {.numOfCols = pMeterMetaInfo->pMeterMeta->numOfColumns}; - SSchema * pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); + SParsedDataColInfo spd = {.numOfCols = pMeterMeta->numOfColumns}; + SSchema * pSchema = tsGetSchema(pMeterMeta); - tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns); + tscSetAssignedColumnInfo(&spd, pSchema, pMeterMeta->numOfColumns); while ((readLen = getline(&line, &n, fp)) != -1) { // line[--readLen] = '\0'; if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0; - if (readLen == 0) continue; //fang, <= to == + if (readLen == 0) continue; // fang, <= to == char *lineptr = line; strtolower(line, line); - - if (numOfRows >= maxRows || pTableDataBlock->size + pMeterMeta->rowSize >= pTableDataBlock->nAllocSize) { - uint32_t tSize = tscAllocateMemIfNeed(pTableDataBlock, pMeterMeta->rowSize); - if (0 == tSize) return (-TSDB_CODE_CLI_OUT_OF_MEMORY); - maxRows += tSize; - } - + len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf); if (len <= 0 || pTableDataBlock->numOfParams > 0) { pSql->res.code = code; return (-code); } - + pTableDataBlock->size += len; count++; @@ -1427,11 +1474,13 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) { } STableDataBlocks *pDataBlock = NULL; - SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + assert(pCmd->numOfClause == 1); + int32_t code = TSDB_CODE_SUCCESS; /* the first block has been sent to server in processSQL function */ - assert(pCmd->isInsertFromFile != -1 && pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL); + assert(pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL); if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) { SDataBlockList *pDataBlocks = pCmd->pDataBlocks; @@ -1443,7 +1492,8 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) { } if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) { - tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, pDataBlocks->nSize); + tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, + pDataBlocks->nSize); continue; } @@ -1456,17 +1506,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) { } // multi-vnodes insertion in sync query model -void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { +void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; if (pCmd->command != TSDB_SQL_INSERT) { return; } - SMeterMetaInfo * pInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + STableDataBlocks *pDataBlock = NULL; int32_t affected_rows = 0; - assert(pCmd->isInsertFromFile == 1 && pCmd->pDataBlocks != NULL); + assert(pCmd->dataSourceType == DATA_FROM_DATA_FILE && pCmd->pDataBlocks != NULL); SDataBlockList *pDataBlockList = pCmd->pDataBlocks; pCmd->pDataBlocks = NULL; @@ -1477,7 +1529,7 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { if (pDataBlock == NULL) { continue; } - + if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) { tscError("%p failed to malloc when insert file", pSql); continue; @@ -1492,16 +1544,16 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { continue; } - strncpy(pInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN); + strncpy(pMeterMetaInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN); memset(pDataBlock->pData, 0, pDataBlock->nAllocSize); - int32_t ret = tscGetMeterMeta(pSql, pInfo->name, 0); + int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo); if (ret != TSDB_CODE_SUCCESS) { tscError("%p get meter meta failed, abort", pSql); continue; } - - char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" + + char *tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" if (NULL == tmpTokenBuf) { tscError("%p calloc failed", pSql); continue; @@ -1509,7 +1561,7 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { int nrows = tscInsertDataFromFile(pSql, fp, tmpTokenBuf); free(tmpTokenBuf); - + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); if (nrows < 0) { diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index f957c584fef8580a28fecf5c4382b49c2d752c19..4ab63c18e9ad339664b1771bead46aedcb0c9d48 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -22,7 +22,7 @@ #include "tstrbuild.h" -int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db); +int tsParseInsertSql(SSqlObj *pSql); int taos_query_imp(STscObj* pObj, SSqlObj* pSql); //////////////////////////////////////////////////////////////////////////////// @@ -385,12 +385,11 @@ static int insertStmtAddBatch(STscStmt* stmt) { } static int insertStmtPrepare(STscStmt* stmt) { - STscObj* taos = stmt->taos; SSqlObj *pSql = stmt->pSql; pSql->cmd.numOfParams = 0; pSql->cmd.batchSize = 0; - return tsParseInsertSql(pSql, pSql->sqlstr, taos->acctId, taos->db); + return tsParseInsertSql(pSql); } static int insertStmtReset(STscStmt* pStmt) { @@ -409,7 +408,7 @@ static int insertStmtReset(STscStmt* pStmt) { } pCmd->batchSize = 0; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); pMeterMetaInfo->vnodeIndex = 0; return TSDB_CODE_SUCCESS; } @@ -423,7 +422,8 @@ static int insertStmtExecute(STscStmt* stmt) { ++pCmd->batchSize; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + assert(pCmd->numOfClause == 1); if (pCmd->pDataBlocks->nSize > 0) { // merge according to vgid @@ -448,6 +448,8 @@ static int insertStmtExecute(STscStmt* stmt) { SSqlRes *pRes = &pSql->res; pRes->numOfRows = 0; pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; + pRes->qhandle = 0; pSql->thandle = NULL; diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index f5925b61cd3c78895f87bc02b3e4fee2b65fb2f4..a7a774b3a8ce71a608d15ec9a71f931a7a59a06a 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -202,10 +202,10 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); } - taos_close_stream(pStream); if (pStream->callback) { pStream->callback(pStream->param); } + taos_close_stream(pStream); } char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) { @@ -285,8 +285,9 @@ void tscKillConnection(STscObj *pObj) { SSqlStream *pStream = pObj->streamList; while (pStream) { + SSqlStream *tmp = pStream->next; taos_close_stream(pStream); - pStream = pStream->next; + pStream = tmp; } pthread_mutex_unlock(&pObj->mutex); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 422ae707ad1a63188088f996fceca127fbdba08a..ef5ec5808b66b3a100fd72714d7e0972eb574298 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -42,13 +42,7 @@ typedef struct SColumnList { SColumnIndex ids[TSDB_MAX_COLUMNS]; } SColumnList; -typedef struct SColumnIdListRes { - SSchema* pSchema; - int32_t numOfCols; - SColumnList list; -} SColumnIdListRes; - -static SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx, int32_t tableIndex); +static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIdx, int32_t tableIndex); static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); static char* getAccountId(SSqlObj* pSql); @@ -59,75 +53,82 @@ static bool hasSpecifyDB(SSQLToken* pTableName); static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd); static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd); -static int32_t setObjFullName(char* fullName, char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* len); +static int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* len); static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength); static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName); -static int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem); -static int32_t insertResultField(SSqlCmd* pCmd, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, - char* fieldName); +static int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIdx, tSQLExprItem* pItem); +static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, + int8_t type, char* fieldName); static int32_t changeFunctionID(int32_t optr, int16_t* functionId); -static int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric); +static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable); -static bool validateIpAddress(char* ip); -static bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd); -static bool functionCompatibleCheck(SSqlCmd* pCmd); -static void setColumnOffsetValueInResultset(SSqlCmd* pCmd); +static bool validateIpAddress(const char* ip, size_t size); +static bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo); +static bool functionCompatibleCheck(SQueryInfo* pQueryInfo); +static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo); -static int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList); +static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd); -static int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql); -static int32_t setSlidingClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql); +static int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); +static int32_t setSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); -static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem); +static int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem); -static int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr); -static int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL); -static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema, int32_t numOfCols); +static int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql); +static int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL); +static int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema); -static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd); +static int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo); static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); -static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd); +static int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo); static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString); -static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd); -static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols, SColumnIdListRes* pList); +static int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo); +static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList); static int32_t validateDNodeConfig(tDCLSQL* pOptions); static int32_t validateLocalConfig(tDCLSQL* pOptions); static int32_t validateColumnName(char* name); static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField); -static bool hasTimestampForPointInterpQuery(SSqlCmd* pCmd); -static void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex); +static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo); +static bool hasDefaultQueryTimeRange(SQueryInfo *pQueryInfo); + +static void updateTagColumnIndex(SQueryInfo* pQueryInfo, int32_t tableIndex); -static int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql); +static int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql); static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql); -static int32_t getColumnIndexByNameEx(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex); -static int32_t getTableIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex); +static int32_t getColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); +static int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); static int32_t optrToString(tSQLExpr* pExpr, char** exprString); -static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex* pIndex); -static int32_t doFunctionsCompatibleCheck(SSqlObj* pSql); -static int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd); +static int32_t getMeterIndex(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); +static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); +static int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate); static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex); +static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo); +static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo); +static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); +static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); + /* * Used during parsing query sql. Since the query sql usually small in length, error position * is not needed in the final error message. */ -static int32_t invalidSqlErrMsg(SSqlCmd* pCmd, const char* errMsg) { - return tscInvalidSQLErrMsg(pCmd->payload, errMsg, NULL); +static int32_t invalidSqlErrMsg(char* dstBuffer, const char* errMsg) { + return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL); } -static int32_t tscQueryOnlyMetricTags(SSqlCmd* pCmd, bool* queryOnMetricTags) { - assert(QUERY_IS_STABLE_QUERY(pCmd->type)); +static int32_t tscQueryOnlyMetricTags(SQueryInfo* pQueryInfo, bool* queryOnMetricTags) { + assert(QUERY_IS_STABLE_QUERY(pQueryInfo->type)); *queryOnMetricTags = true; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_TAGPRJ && !(pExpr->functionId == TSDB_FUNC_COUNT && pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX)) { @@ -139,21 +140,21 @@ static int32_t tscQueryOnlyMetricTags(SSqlCmd* pCmd, bool* queryOnMetricTags) { return TSDB_CODE_SUCCESS; } -static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, tVariant* pVar) { +static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVar) { int64_t time = 0; const char* msg = "invalid timestamp"; strdequote(pVar->pz); char* seg = strnchr(pVar->pz, '-', pVar->nLen, false); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); if (seg != NULL) { if (taosParseTime(pVar->pz, &time, pVar->nLen, pMeterMetaInfo->pMeterMeta->precision) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } else { if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT)) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } @@ -163,153 +164,137 @@ static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, tVariant* pVar) { return TSDB_CODE_SUCCESS; } +static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) { + const char* msg1 = "password can not be empty"; + const char* msg2 = "name or password too long"; + const char* msg3 = "password needs single quote marks enclosed"; + + if (pPwd->type != TK_STRING) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + strdequote(pPwd->z); + strtrim(pPwd->z); // trim space before and after passwords + pPwd->n = strlen(pPwd->z); + + if (pPwd->n <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pPwd->n > TSDB_PASSWORD_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + return TSDB_CODE_SUCCESS; +} + // todo handle memory leak in error handle function int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) { return TSDB_CODE_APP_ERROR; } - SSqlCmd* pCmd = &(pSql->cmd); + SSqlCmd* pCmd = &(pSql->cmd); + SQueryInfo* pQueryInfo = NULL; - if (!pInfo->validSql) { - return invalidSqlErrMsg(pCmd, pInfo->pzErrMsg); + if (!pInfo->valid) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), pInfo->pzErrMsg); } - SMeterMetaInfo* pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); + int32_t code = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo); + assert(pQueryInfo->numOfTables == 0); + + SMeterMetaInfo* pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); + + pCmd->command = pInfo->type; - // transfer pInfo into select operation - switch (pInfo->sqlType) { - case DROP_TABLE: - case DROP_USER: - case DROP_ACCOUNT: - case DROP_DNODE: - case DROP_DATABASE: { - const char* msg = "param name too long"; + switch (pInfo->type) { + case TSDB_SQL_DROP_TABLE: + case TSDB_SQL_DROP_USER: + case TSDB_SQL_DROP_ACCT: + case TSDB_SQL_DROP_DNODE: + case TSDB_SQL_DROP_DB: { const char* msg1 = "invalid ip address"; const char* msg2 = "invalid name"; + const char* msg3 = "param name too long"; SSQLToken* pzName = &pInfo->pDCLInfo->a[0]; - if ((pInfo->sqlType != DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) { - return invalidSqlErrMsg(pCmd, msg2); + if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (pInfo->sqlType == DROP_DATABASE) { - assert(pInfo->pDCLInfo->nTokens == 2); - - pCmd->command = TSDB_SQL_DROP_DB; - pCmd->existsCheck = (pInfo->pDCLInfo->a[1].n == 1); + if (pInfo->type == TSDB_SQL_DROP_DB) { + assert(pInfo->pDCLInfo->nTokens == 1); - int32_t code = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL); + code = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL); if (code != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - return code; - } else if (pInfo->sqlType == DROP_TABLE) { - assert(pInfo->pDCLInfo->nTokens == 2); - - pCmd->existsCheck = (pInfo->pDCLInfo->a[1].n == 1); - pCmd->command = TSDB_SQL_DROP_TABLE; + } else if (pInfo->type == TSDB_SQL_DROP_TABLE) { + assert(pInfo->pDCLInfo->nTokens == 1); - int32_t ret = setMeterID(pSql, pzName, 0); - if (ret != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg); + if (setMeterID(pMeterMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - return ret; - } else { - if (pzName->n > TSDB_USER_LEN) { - return invalidSqlErrMsg(pCmd, msg); + } else if (pInfo->type == TSDB_SQL_DROP_DNODE) { + if (!validateIpAddress(pzName->z, pzName->n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pInfo->sqlType == DROP_USER) { - pCmd->command = TSDB_SQL_DROP_USER; - } else if (pInfo->sqlType == DROP_ACCOUNT) { - pCmd->command = TSDB_SQL_DROP_ACCT; - } else if (pInfo->sqlType == DROP_DNODE) { - pCmd->command = TSDB_SQL_DROP_DNODE; - const int32_t MAX_IP_ADDRESS_LEGNTH = 16; - - if (pzName->n > MAX_IP_ADDRESS_LEGNTH) { - return invalidSqlErrMsg(pCmd, msg1); - } - - char str[128] = {0}; - strncpy(str, pzName->z, pzName->n); - if (!validateIpAddress(str)) { - return invalidSqlErrMsg(pCmd, msg1); - } + strncpy(pMeterMetaInfo->name, pzName->z, pzName->n); + } else { // drop user + if (pzName->n > TSDB_USER_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } strncpy(pMeterMetaInfo->name, pzName->z, pzName->n); - return TSDB_CODE_SUCCESS; } - } - case USE_DATABASE: { - pCmd->command = TSDB_SQL_USE_DB; + break; + } - SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; + case TSDB_SQL_USE_DB: { + const char* msg = "invalid db name"; + SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, "invalid db name"); - } - - if (pToken->n > TSDB_DB_NAME_LEN) { - const char* msg = "db name too long"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pToken, NULL, NULL); if (ret != TSDB_CODE_SUCCESS) { - return ret; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } break; } - case RESET_QUERY_CACHE: { - pCmd->command = TSDB_SQL_RESET_CACHE; - break; + case TSDB_SQL_RESET_CACHE: { + return TSDB_CODE_SUCCESS; } - case SHOW_DATABASES: - case SHOW_TABLES: - case SHOW_STABLES: - case SHOW_MNODES: - case SHOW_DNODES: - case SHOW_ACCOUNTS: - case SHOW_USERS: - case SHOW_VGROUPS: - case SHOW_MODULES: - case SHOW_CONNECTIONS: - case SHOW_QUERIES: - case SHOW_STREAMS: - case SHOW_SCORES: - case SHOW_GRANTS: - case SHOW_CONFIGS: - case SHOW_VNODES: { - return setShowInfo(pSql, pInfo); - } - - case ALTER_DATABASE: - case CREATE_DATABASE: { - if (pInfo->sqlType == ALTER_DATABASE) { - pCmd->command = TSDB_SQL_ALTER_DB; - } else { - pCmd->command = TSDB_SQL_CREATE_DB; - pCmd->existsCheck = (pInfo->pDCLInfo->a[0].n == 1); + case TSDB_SQL_SHOW: { + if (setShowInfo(pSql, pInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } + break; + } + + case TSDB_SQL_ALTER_DB: + case TSDB_SQL_CREATE_DB: { + const char* msg1 = "invalid db name"; + const char* msg2 = "name too long"; + SCreateDBInfo* pCreateDB = &(pInfo->pDCLInfo->dbOpt); if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) { - const char* msg3 = "invalid db name"; - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname), NULL, NULL); if (ret != TSDB_CODE_SUCCESS) { - const char* msg2 = "name too long"; - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } if (parseCreateDBOptions(pCmd, pCreateDB) != TSDB_CODE_SUCCESS) { @@ -319,282 +304,167 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { break; } - case CREATE_DNODE: { - // todo parse hostname - pCmd->command = TSDB_SQL_CREATE_DNODE; + case TSDB_SQL_CREATE_DNODE: { // todo parse hostname const char* msg = "invalid ip address"; - char ipAddr[64] = {0}; - const int32_t MAX_IP_ADDRESS_LENGTH = 16; - if (pInfo->pDCLInfo->nTokens > 1 || pInfo->pDCLInfo->a[0].n > MAX_IP_ADDRESS_LENGTH) { - return invalidSqlErrMsg(pCmd, msg); - } - - memcpy(ipAddr, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); - if (validateIpAddress(ipAddr) == false) { - return invalidSqlErrMsg(pCmd, msg); - } - - strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); - break; - } - - case CREATE_ACCOUNT: - case CREATE_USER: { - pCmd->command = (pInfo->sqlType == CREATE_USER) ? TSDB_SQL_CREATE_USER : TSDB_SQL_CREATE_ACCT; - assert(pInfo->pDCLInfo->nTokens >= 2); - - if (pInfo->pDCLInfo->a[1].type != TK_STRING) { - const char* msg3 = "password needs single quote marks enclosed"; - return invalidSqlErrMsg(pCmd, msg3); - } - - strdequote(pInfo->pDCLInfo->a[1].z); - strtrim(pInfo->pDCLInfo->a[1].z); // trim space before and after passwords - pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); - - if (pInfo->pDCLInfo->a[1].n <= 0) { - const char* msg1 = "password can not be empty"; - return invalidSqlErrMsg(pCmd, msg1); + if (pInfo->pDCLInfo->nTokens > 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - const char* msg = "name or password too long"; - return invalidSqlErrMsg(pCmd, msg); + SSQLToken* pIpAddr = &pInfo->pDCLInfo->a[0]; + if (!validateIpAddress(pIpAddr->z, pIpAddr->n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { - const char* msg2 = "invalid user/account name"; - return invalidSqlErrMsg(pCmd, msg2); - } - - strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name - strncpy(pCmd->payload, pInfo->pDCLInfo->a[1].z, pInfo->pDCLInfo->a[1].n); // passwd - - if (pInfo->sqlType == CREATE_ACCOUNT) { - SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt; - - pCmd->defaultVal[0] = pAcctOpt->users; - pCmd->defaultVal[1] = pAcctOpt->dbs; - pCmd->defaultVal[2] = pAcctOpt->tseries; - pCmd->defaultVal[3] = pAcctOpt->streams; - pCmd->defaultVal[4] = pAcctOpt->pps; - pCmd->defaultVal[5] = pAcctOpt->storage; - pCmd->defaultVal[6] = pAcctOpt->qtime; - pCmd->defaultVal[7] = pAcctOpt->conns; - - if (pAcctOpt->stat.n == 0) { - pCmd->defaultVal[8] = -1; - } else { - strdequote(pAcctOpt->stat.z); - pAcctOpt->stat.n = strlen(pAcctOpt->stat.z); - - if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_READ_ACCCESS; - } else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_WRITE_ACCCESS; - } else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) { - pCmd->defaultVal[8] = TSDB_VN_ALL_ACCCESS; - } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { - pCmd->defaultVal[8] = 0; - } else { - const char* msg4 = "invalid state option, available options[no, r, w, all]"; - return invalidSqlErrMsg(pCmd, msg4); - } - } - } break; } - case ALTER_ACCT: { - pCmd->command = TSDB_SQL_ALTER_ACCT; - int32_t num = pInfo->pDCLInfo->nTokens; - assert(num >= 1 && num <= 2); - - const char* msg = "password too long"; - - if (num == 2) { - if (pInfo->pDCLInfo->a[1].type != TK_STRING) { - const char* msg3 = "password needs single quote marks enclosed"; - return invalidSqlErrMsg(pCmd, msg3); - } - strdequote(pInfo->pDCLInfo->a[1].z); - strtrim(pInfo->pDCLInfo->a[1].z); // trim space before and after passwords - pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); + case TSDB_SQL_CREATE_ACCT: + case TSDB_SQL_ALTER_ACCT: { + const char* msg1 = "invalid state option, available options[no, r, w, all]"; + const char* msg2 = "invalid user/account name"; + const char* msg3 = "name too long"; - if (pInfo->pDCLInfo->a[1].n <= 0) { - const char* msg1 = "password can not be empty"; - return invalidSqlErrMsg(pCmd, msg1); - } - - if (pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - return invalidSqlErrMsg(pCmd, msg); - } + SSQLToken* pName = &pInfo->pDCLInfo->user.user; + SSQLToken* pPwd = &pInfo->pDCLInfo->user.passwd; - strncpy(pCmd->payload, pInfo->pDCLInfo->a[1].z, pInfo->pDCLInfo->a[1].n); // passwd + if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN) { - return invalidSqlErrMsg(pCmd, msg); + if (pName->n > TSDB_USER_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { - const char* msg2 = "invalid user/account name"; - return invalidSqlErrMsg(pCmd, msg2); + if (tscValidateName(pName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name - SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt; - pCmd->defaultVal[0] = pAcctOpt->users; - pCmd->defaultVal[1] = pAcctOpt->dbs; - pCmd->defaultVal[2] = pAcctOpt->tseries; - pCmd->defaultVal[3] = pAcctOpt->streams; - pCmd->defaultVal[4] = pAcctOpt->pps; - pCmd->defaultVal[5] = pAcctOpt->storage; - pCmd->defaultVal[6] = pAcctOpt->qtime; - pCmd->defaultVal[7] = pAcctOpt->conns; - - if (pAcctOpt->stat.n == 0) { - pCmd->defaultVal[8] = -1; - } else { - strdequote(pAcctOpt->stat.z); - pAcctOpt->stat.n = strlen(pAcctOpt->stat.z); - + if (pAcctOpt->stat.n > 0) { if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_READ_ACCCESS; } else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_WRITE_ACCCESS; } else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) { - pCmd->defaultVal[8] = TSDB_VN_ALL_ACCCESS; } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { - pCmd->defaultVal[8] = 0; } else { - const char* msg4 = "invalid state option, available options[no, r, w, all]"; - return invalidSqlErrMsg(pCmd, msg4); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } + break; } - case DESCRIBE_TABLE: { - pCmd->command = TSDB_SQL_DESCRIBE_TABLE; + case TSDB_SQL_DESCRIBE_TABLE: { SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; - const char* msg = "table name is too long"; + const char* msg2 = "table name is too long"; const char* msg1 = "invalid table name"; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } if (pToken->n > TSDB_METER_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (setMeterID(pSql, pToken, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); + if (setMeterID(pMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - break; + return tscGetMeterMeta(pSql, pMeterMetaInfo); } - case ALTER_DNODE: - case ALTER_USER_PASSWD: - case ALTER_USER_PRIVILEGES: { - pCmd->command = (pInfo->sqlType == ALTER_DNODE) ? TSDB_SQL_CFG_DNODE : TSDB_SQL_ALTER_USER; - tDCLSQL* pDCL = pInfo->pDCLInfo; - - const char* msg = "parameters too long"; + case TSDB_SQL_CFG_DNODE: { const char* msg1 = "invalid ip address"; const char* msg2 = "invalid configure options or values"; - const char* msg3 = "password can not be empty"; - if (pInfo->sqlType != ALTER_DNODE) { - strdequote(pDCL->a[1].z); - strtrim(pDCL->a[1].z); - pDCL->a[1].n = strlen(pDCL->a[1].z); + /* validate the ip address */ + tDCLSQL* pDCL = pInfo->pDCLInfo; + if (!validateIpAddress(pDCL->a[0].z, pDCL->a[0].n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pDCL->a[1].n <= 0) { - return invalidSqlErrMsg(pCmd, msg3); + /* validate the parameter names and options */ + if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (pDCL->a[0].n > TSDB_METER_NAME_LEN || pDCL->a[1].n > TSDB_PASSWORD_LEN) { - return invalidSqlErrMsg(pCmd, msg); - } + char* pMsg = pCmd->payload + tsRpcHeadSize; + pMsg += sizeof(SMgmtHead); - if (pCmd->command == TSDB_SQL_CFG_DNODE) { - char ip[128] = {0}; - strncpy(ip, pDCL->a[0].z, pDCL->a[0].n); + SCfgMsg* pCfg = (SCfgMsg*)pMsg; + strncpy(pCfg->ip, pDCL->a[0].z, pDCL->a[0].n); - /* validate the ip address */ - if (!validateIpAddress(ip)) { - return invalidSqlErrMsg(pCmd, msg1); - } + strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n); - strcpy(pMeterMetaInfo->name, ip); + if (pDCL->nTokens == 3) { + pCfg->config[pDCL->a[1].n] = ' '; // add sep + strncpy(&pCfg->config[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n); + } - /* validate the parameter names and options */ - if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); - } + break; + } - strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); + case TSDB_SQL_CREATE_USER: + case TSDB_SQL_ALTER_USER: { + const char* msg5 = "invalid user rights"; + const char* msg7 = "not support options"; + const char* msg2 = "invalid user/account name"; + const char* msg3 = "name too long"; - if (pDCL->nTokens == 3) { - pCmd->payload[pDCL->a[1].n] = ' '; // add sep - strncpy(&pCmd->payload[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n); - } - } else { - const char* msg = "invalid user rights"; - const char* msg1 = "password can not be empty or larger than 24 characters"; + pCmd->command = pInfo->type; + //tDCLSQL* pDCL = pInfo->pDCLInfo; - strncpy(pMeterMetaInfo->name, pDCL->a[0].z, pDCL->a[0].n); + SUserInfo* pUser = &pInfo->pDCLInfo->user; + SSQLToken* pName = &pUser->user; + SSQLToken* pPwd = &pUser->passwd; - if (pInfo->sqlType == ALTER_USER_PASSWD) { - /* update the password for user */ - pCmd->order.order |= TSDB_ALTER_USER_PASSWD; + if (pName->n > TSDB_USER_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } - strdequote(pDCL->a[1].z); - pDCL->a[1].n = strlen(pDCL->a[1].z); + if (tscValidateName(pName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } - if (pDCL->a[1].n <= 0 || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - /* password cannot be empty string */ - return invalidSqlErrMsg(pCmd, msg1); + if (pCmd->command == TSDB_SQL_CREATE_USER) { + if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + } else { + if (pUser->type == TSDB_ALTER_USER_PASSWD) { + if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } + } else if (pUser->type == TSDB_ALTER_USER_PRIVILEGES) { + assert(pPwd->type == TSDB_DATA_TYPE_NULL); - strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); - } else if (pInfo->sqlType == ALTER_USER_PRIVILEGES) { - pCmd->order.order |= TSDB_ALTER_USER_PRIVILEGES; + SSQLToken* pPrivilege = &pUser->privilege; - if (strncasecmp(pDCL->a[1].z, "super", 5) == 0 && pDCL->a[1].n == 5) { + if (strncasecmp(pPrivilege->z, "super", 5) == 0 && pPrivilege->n == 5) { pCmd->count = 1; - } else if (strncasecmp(pDCL->a[1].z, "read", 4) == 0 && pDCL->a[1].n == 4) { + } else if (strncasecmp(pPrivilege->z, "read", 4) == 0 && pPrivilege->n == 4) { pCmd->count = 2; - } else if (strncasecmp(pDCL->a[1].z, "write", 5) == 0 && pDCL->a[1].n == 5) { + } else if (strncasecmp(pPrivilege->z, "write", 5) == 0 && pPrivilege->n == 5) { pCmd->count = 3; } else { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } } else { - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } } + break; } - case ALTER_LOCAL: { - pCmd->command = TSDB_SQL_CFG_LOCAL; + + case TSDB_SQL_CFG_LOCAL: { tDCLSQL* pDCL = pInfo->pDCLInfo; const char* msg = "invalid configure options or values"; // validate the parameter names and options if (validateLocalConfig(pDCL) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } strncpy(pCmd->payload, pDCL->a[0].z, pDCL->a[0].n); @@ -605,456 +475,100 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { break; } - case TSQL_CREATE_NORMAL_METER: - case TSQL_CREATE_NORMAL_METRIC: { - const char* msg = "table name too long"; - const char* msg1 = "invalid table name"; - - tFieldList* pFieldList = pInfo->pCreateTableInfo->colInfo.pColumns; - tFieldList* pTagList = pInfo->pCreateTableInfo->colInfo.pTagColumns; - assert(pFieldList != NULL); - pCmd->command = TSDB_SQL_CREATE_TABLE; - pCmd->existsCheck = pInfo->pCreateTableInfo->existCheck; + case TSDB_SQL_CREATE_TABLE: { + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; - // if sql specifies db, use it, otherwise use default db - SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); - - if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - if (setMeterID(pSql, pzTableName, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - if (!validateTableColumnInfo(pFieldList, pCmd) || - (pTagList != NULL && !validateTagParams(pTagList, pFieldList, pCmd))) { - return TSDB_CODE_INVALID_SQL; - } - - int32_t col = 0; - for (; col < pFieldList->nField; ++col) { - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, col, &pFieldList->p[col]); - } - pCmd->numOfCols = (int16_t)pFieldList->nField; - - if (pTagList != NULL) { // create metric[optional] - for (int32_t i = 0; i < pTagList->nField; ++i) { - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, col++, &pTagList->p[i]); - } - pCmd->count = pTagList->nField; - } - - break; - } - case TSQL_CREATE_METER_FROM_METRIC: { - pCmd->command = TSDB_SQL_CREATE_TABLE; - pCmd->existsCheck = pInfo->pCreateTableInfo->existCheck; - - const char* msg = "invalid table name"; - const char* msg1 = "illegal value or data overflow"; - const char* msg2 = "illegal number of tags"; - const char* msg3 = "tag value too long"; - - // table name - // metric name, create table by using dst - SSQLToken* pToken = &(pInfo->pCreateTableInfo->usingInfo.metricName); - - if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - if (setMeterID(pSql, pToken, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - // get meter meta from mnode - STagData* pTag = (STagData*)pCmd->payload; - strncpy(pTag->name, pMeterMetaInfo->name, TSDB_METER_ID_LEN); - - tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; - - int32_t code = tscGetMeterMeta(pSql, pTag->name, 0); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - if (pMeterMetaInfo->pMeterMeta->numOfTags != pList->nExpr) { - return invalidSqlErrMsg(pCmd, msg2); - } - - // too long tag values will return invalid sql, not be truncated automatically - SSchema* pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); - - char* tagVal = pTag->data; - for (int32_t i = 0; i < pList->nExpr; ++i) { - int32_t ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type); - if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - // validate the length of binary - if ((pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) && - pList->a[i].pVar.nLen > pTagSchema[i].bytes) { - return invalidSqlErrMsg(pCmd, msg3); - } - - tagVal += pTagSchema[i].bytes; - } - - if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - int32_t ret = setMeterID(pSql, &pInfo->pCreateTableInfo->name, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - pCmd->numOfCols = 0; - pCmd->count = 0; - break; - } - case TSQL_CREATE_STREAM: { - pCmd->command = TSDB_SQL_CREATE_TABLE; - const char* msg1 = "invalid table name"; - const char* msg2 = "table name too long"; - const char* msg3 = "fill only available for interval query"; - const char* msg4 = "fill option not supported in stream computing"; - const char* msg5 = "sql too long"; // todo ADD support - - // if sql specifies db, use it, otherwise use default db - SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); - SQuerySQL* pQuerySql = pInfo->pCreateTableInfo->pSelect; - - if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; - tVariant* pVar = &pSrcMeterName->a[0].pVar; - - SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING}; - if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - if (setMeterID(pSql, &srcToken, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); - } - - int32_t code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - bool isMetric = UTIL_METER_IS_METRIC(pMeterMetaInfo); - if (parseSelectClause(pCmd, pQuerySql->pSelection, isMetric) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - if (pQuerySql->pWhere != NULL) { // query condition in stream computing - if (parseWhereClause(pSql, &pQuerySql->pWhere) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - } - - // set interval value - if (parseIntervalClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } else { - if ((pCmd->nAggTimeInterval > 0) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd) != TSDB_CODE_SUCCESS)) { - return TSDB_CODE_INVALID_SQL; + if (pCreateTable->type == TSQL_CREATE_TABLE || pCreateTable->type == TSQL_CREATE_STABLE) { + if ((code = doCheckForCreateTable(pSql, 0, pInfo)) != TSDB_CODE_SUCCESS) { + return code; } - } - - if (setSlidingClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - // set the created table[stream] name - if (setMeterID(pSql, pzTableName, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - // copy sql length - int ret = tscAllocPayload(pCmd, pQuerySql->selectToken.n + 8); - if (TSDB_CODE_SUCCESS != ret) { - invalidSqlErrMsg(pCmd, "client out of memory"); - return ret; - } - - strncpy(pCmd->payload, pQuerySql->selectToken.z, pQuerySql->selectToken.n); - if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) { - return invalidSqlErrMsg(pCmd, msg5); - } - - if (tsRewriteFieldNameIfNecessary(pCmd) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - pCmd->numOfCols = pCmd->fieldsInfo.numOfOutputCols; - - if (validateSqlFunctionInStreamSql(pCmd) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - /* - * check if fill operation is available, the fill operation is parsed and executed during query execution, not - * here. - */ - if (pQuerySql->fillType != NULL) { - if (pCmd->nAggTimeInterval == 0) { - return invalidSqlErrMsg(pCmd, msg3); + } else if (pCreateTable->type == TSQL_CREATE_TABLE_FROM_STABLE) { + if ((code = doCheckForCreateFromStable(pSql, pInfo)) != TSDB_CODE_SUCCESS) { + return code; } - tVariantListItem* pItem = &pQuerySql->fillType->a[0]; - if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { - if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || - (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { - return invalidSqlErrMsg(pCmd, msg4); - } + } else if (pCreateTable->type == TSQL_CREATE_STREAM) { + if ((code = doCheckForStream(pSql, pInfo)) != TSDB_CODE_SUCCESS) { + return code; } } break; } - case TSQL_QUERY_METER: { - SQuerySQL* pQuerySql = pInfo->pQueryInfo; - assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); - - const char* msg0 = "invalid table name"; - const char* msg1 = "table name too long"; - const char* msg2 = "point interpolation query needs timestamp"; - const char* msg3 = "sliding value too small"; - const char* msg4 = "sliding value no larger than the interval value"; - const char* msg5 = "fill only available for interval query"; - const char* msg6 = "start(end) time of query range required or time range too large"; - const char* msg7 = "illegal number of tables in from clause"; - const char* msg8 = "too many columns in selection clause"; - const char* msg9 = "TWA query requires both the start and end time"; - - int32_t code = TSDB_CODE_SUCCESS; - - // too many result columns not support order by in query - if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) { - return invalidSqlErrMsg(pCmd, msg8); - } - - /* - * handle the sql expression without from subclause - * select current_database(); - * select server_version(); - * select client_version(); - * select server_state(); - */ - if (pQuerySql->from == NULL) { - assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL && - pQuerySql->pSortOrder == NULL); - return doLocalQueryProcess(pQuerySql, pCmd); - } - - if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) { - return invalidSqlErrMsg(pCmd, msg7); - } - - // set all query tables, which are maybe more than one. - for (int32_t i = 0; i < pQuerySql->from->nExpr; ++i) { - tVariant* pTableItem = &pQuerySql->from->a[i].pVar; - - if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg0); - } - - pTableItem->nLen = strdequote(pTableItem->pz); - - SSQLToken tableName = {.z = pTableItem->pz, .n = pTableItem->nLen, .type = TK_STRING}; - if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg0); - } - - if (pCmd->numOfTables <= i) { - tscAddEmptyMeterMetaInfo(pCmd); - } - - SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; - if (setMeterID(pSql, &t, i) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - SMeterMetaInfo* pMeterInfo1 = tscGetMeterMetaInfo(pCmd, i); - code = tscGetMeterMeta(pSql, pMeterInfo1->name, i); - if (code != TSDB_CODE_SUCCESS) { + case TSDB_SQL_SELECT: { + assert(pCmd->numOfClause == 1); + const char* msg1 = "columns in select clause not identical"; + + for (int32_t i = pCmd->numOfClause; i < pInfo->subclauseInfo.numOfClause; ++i) { + SQueryInfo* pqi = NULL; + if ((code = tscGetQueryInfoDetailSafely(pCmd, i, &pqi)) != TSDB_CODE_SUCCESS) { return code; } } - pSql->cmd.command = TSDB_SQL_SELECT; - - // parse the group by clause in the first place - if (parseGroupbyClause(pCmd, pQuerySql->pGroupby) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - bool isMetric = UTIL_METER_IS_METRIC(pMeterMetaInfo); - if (parseSelectClause(pCmd, pQuerySql->pSelection, isMetric) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - // set interval value - if (parseIntervalClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } else { - if ((pCmd->nAggTimeInterval > 0) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd) != TSDB_CODE_SUCCESS)) { - return TSDB_CODE_INVALID_SQL; - } - } - - // set sliding value - SSQLToken* pSliding = &pQuerySql->sliding; - if (pSliding->n != 0) { - // TODO refactor pCmd->count == 1 means sql in stream function - if (!tscEmbedded && pCmd->count == 0) { - const char* msg = "not support sliding in query"; - return invalidSqlErrMsg(pCmd, msg); - } - - getTimestampInUsFromStr(pSliding->z, pSliding->n, &pCmd->nSlidingTime); - if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->nSlidingTime /= 1000; - } - - if (pCmd->nSlidingTime < tsMinSlidingTime) { - return invalidSqlErrMsg(pCmd, msg3); - } - - if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - return invalidSqlErrMsg(pCmd, msg4); - } - } - - // set order by info - if (parseOrderbyClause(pCmd, pQuerySql, tsGetSchema(pMeterMetaInfo->pMeterMeta), - pMeterMetaInfo->pMeterMeta->numOfColumns) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - // set where info - if (pQuerySql->pWhere != NULL) { - if (parseWhereClause(pSql, &pQuerySql->pWhere) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - pQuerySql->pWhere = NULL; - - if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->stime = pCmd->stime / 1000; - pCmd->etime = pCmd->etime / 1000; - } - } else { // set the time rang - pCmd->stime = 0; - pCmd->etime = INT64_MAX; - } - - // user does not specified the query time window, twa is not allowed in such case. - if ((pCmd->stime == 0 || pCmd->etime == INT64_MAX || - (pCmd->etime == INT64_MAX / 1000 && pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI)) && - tscIsTWAQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg9); - } - - // no result due to invalid query time range - if (pCmd->stime > pCmd->etime) { - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; - return TSDB_CODE_SUCCESS; - } - - if (!hasTimestampForPointInterpQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg2); - } - - if (pQuerySql->fillType != NULL) { - if (pCmd->nAggTimeInterval == 0 && (!tscIsPointInterpQuery(pCmd))) { - return invalidSqlErrMsg(pCmd, msg5); - } + assert(pCmd->numOfClause == pInfo->subclauseInfo.numOfClause); + for (int32_t i = 0; i < pInfo->subclauseInfo.numOfClause; ++i) { + SQuerySQL* pQuerySql = pInfo->subclauseInfo.pClause[i]; - if (pCmd->nAggTimeInterval > 0) { - int64_t timeRange = labs(pCmd->stime - pCmd->etime); - // number of result is not greater than 10,000,000 - if ((timeRange == 0) || (timeRange / pCmd->nAggTimeInterval) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) { - return invalidSqlErrMsg(pCmd, msg6); - } - } - - int32_t ret = parseFillClause(pCmd, pQuerySql); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if ((code = doCheckForQuery(pSql, pQuerySql, i)) != TSDB_CODE_SUCCESS) { + return code; } + + tscPrintSelectClause(pSql, i); } - - // in case of join query, time range is required. - if (QUERY_IS_JOIN_QUERY(pCmd->type)) { - int64_t timeRange = labs(pCmd->stime - pCmd->etime); - - if (timeRange == 0 && pCmd->stime == 0) { - return invalidSqlErrMsg(pCmd, msg6); + + // set the command/global limit parameters from the first subclause to the sqlcmd object + SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pCmd, 0); + pCmd->command = pQueryInfo1->command; + + // if there is only one element, the limit of clause is the limit of global result. + for(int32_t i = 1; i < pCmd->numOfClause; ++i) { + SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i); + + int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo); + if (ret != 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } - // handle the limit offset value, validate the limit - pCmd->limit = pQuerySql->limit; - - // temporarily save the original limitation value - if ((code = parseLimitClause(pSql, pQuerySql)) != TSDB_CODE_SUCCESS) { - return code; - } + return TSDB_CODE_SUCCESS; // do not build query message here + } - if ((code = doFunctionsCompatibleCheck(pSql)) != TSDB_CODE_SUCCESS) { + case TSDB_SQL_ALTER_TABLE: { + if ((code = setAlterTableInfo(pSql, pInfo)) != TSDB_CODE_SUCCESS) { return code; } - setColumnOffsetValueInResultset(pCmd); + break; + } - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - updateTagColumnIndex(pCmd, i); + case TSDB_SQL_KILL_QUERY: + case TSDB_SQL_KILL_STREAM: + case TSDB_SQL_KILL_CONNECTION: { + if ((code = setKillInfo(pSql, pInfo)) != TSDB_CODE_SUCCESS) { + return code; } break; } - case TSQL_INSERT: { - assert(false); - } - case ALTER_TABLE_ADD_COLUMN: - case ALTER_TABLE_DROP_COLUMN: - case ALTER_TABLE_TAGS_ADD: - case ALTER_TABLE_TAGS_DROP: - case ALTER_TABLE_TAGS_CHG: - case ALTER_TABLE_TAGS_SET: { - return setAlterTableInfo(pSql, pInfo); - } - - case KILL_CONNECTION: - case KILL_QUERY: - case KILL_STREAM: { - return setKillInfo(pSql, pInfo); - } default: - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression"); } - return TSDB_CODE_SUCCESS; + return tscBuildMsg[pCmd->command](pSql, pInfo); } /* * if the top/bottom exists, only tags columns, tbname column, and primary timestamp column * are available. */ -static bool isTopBottomQuery(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; +static bool isTopBottomQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { return true; @@ -1064,11 +578,11 @@ static bool isTopBottomQuery(SSqlCmd* pCmd) { return false; } -int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { +int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg1 = "invalid query expression"; const char* msg2 = "interval cannot be less than 10 ms"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); if (pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0) { return TSDB_CODE_SUCCESS; @@ -1076,42 +590,53 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { // interval is not null SSQLToken* t = &pQuerySql->interval; - if (getTimestampInUsFromStr(t->z, t->n, &pCmd->nAggTimeInterval) != TSDB_CODE_SUCCESS) { + if (getTimestampInUsFromStr(t->z, t->n, &pQueryInfo->nAggTimeInterval) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - /* revised the time precision according to the flag */ + // if the unit of time window value is millisecond, change the value from microsecond if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->nAggTimeInterval = pCmd->nAggTimeInterval / 1000; + pQueryInfo->nAggTimeInterval = pQueryInfo->nAggTimeInterval / 1000; } /* parser has filter the illegal type, no need to check here */ - pCmd->intervalTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1]; + pQueryInfo->intervalTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1]; // interval cannot be less than 10 milliseconds - if (pCmd->nAggTimeInterval < tsMinIntervalTime) { - return invalidSqlErrMsg(pCmd, msg2); + if (pQueryInfo->nAggTimeInterval < tsMinIntervalTime) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } // for top/bottom + interval query, we do not add additional timestamp column in the front - if (isTopBottomQuery(pCmd)) { + if (isTopBottomQuery(pQueryInfo)) { return TSDB_CODE_SUCCESS; } - // check the invalid sql expresssion: select count(tbname)/count(tag1)/count(tag2) from super_table interval(1d); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + /* + * check invalid SQL: + * select count(tbname)/count(tag1)/count(tag2) from super_table_name interval(1d); + */ + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } + + /* + * check invalid SQL: + * select tbname, tags_fields from super_table_name interval(1s) + */ + if (tscQueryMetricTags(pQueryInfo) && pQueryInfo->nAggTimeInterval > 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } // need to add timestamp column in result set, if interval is existed - uint64_t uid = tscSqlExprGet(pCmd, 0)->uid; + uint64_t uid = tscSqlExprGet(pQueryInfo, 0)->uid; int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); if (pMeterMetaInfo->pMeterMeta->uid == uid) { tableIndex = i; break; @@ -1123,51 +648,64 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { } SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + tscSqlExprInsert(pQueryInfo, 0, TSDB_FUNC_TS, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); SColumnList ids = getColumnList(1, 0, PRIMARYKEY_TIMESTAMP_COL_INDEX); - int32_t ret = insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); - return ret; + int32_t ret = insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (setSlidingClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; } -int32_t setSlidingClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { +int32_t setSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg0 = "sliding value too small"; const char* msg1 = "sliding value no larger than the interval value"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSQLToken* pSliding = &pQuerySql->sliding; if (pSliding->n != 0) { - getTimestampInUsFromStr(pSliding->z, pSliding->n, &pCmd->nSlidingTime); + getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->nSlidingTime); if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->nSlidingTime /= 1000; + pQueryInfo->nSlidingTime /= 1000; } - if (pCmd->nSlidingTime < tsMinSlidingTime) { - return invalidSqlErrMsg(pCmd, msg0); + if (pQueryInfo->nSlidingTime < tsMinSlidingTime) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } - if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - return invalidSqlErrMsg(pCmd, msg1); + if (pQueryInfo->nSlidingTime > pQueryInfo->nAggTimeInterval) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } + } else { + pSliding->n = pQueryInfo->nAggTimeInterval; } return TSDB_CODE_SUCCESS; } -int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex) { +int32_t setMeterID(SMeterMetaInfo* pMeterMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql) { const char* msg = "name too long"; - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); - int32_t code = TSDB_CODE_SUCCESS; + SSqlCmd* pCmd = &pSql->cmd; + int32_t code = TSDB_CODE_SUCCESS; + // backup the old name in pMeterMetaInfo + size_t size = strlen(pMeterMetaInfo->name); + char* oldName = NULL; + if (size > 0) { + oldName = strdup(pMeterMetaInfo->name); + } + if (hasSpecifyDB(pzTableName)) { - /* - * db has been specified in sql string - * so we ignore current db path - */ + // db has been specified in sql string so we ignore current db path code = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL); } else { // get current DB name first, then set it into path SSQLToken t = {0}; @@ -1177,10 +715,28 @@ int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex) { } if (code != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - return code; + if (code != TSDB_CODE_SUCCESS) { + free(oldName); + return code; + } + + /* + * the old name exists and is not equalled to the new name. Release the metermeta/metricmeta + * that are corresponding to the old name for the new table name. + */ + if (size > 0) { + if (strncasecmp(oldName, pMeterMetaInfo->name, tListLen(pMeterMetaInfo->name)) != 0) { + tscClearMeterMetaInfo(pMeterMetaInfo, false); + } + } else { + assert(pMeterMetaInfo->pMeterMeta == NULL && pMeterMetaInfo->pMetricMeta == NULL); + } + + tfree(oldName); + return TSDB_CODE_SUCCESS; } static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { @@ -1196,13 +752,13 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { // number of fields no less than 2 if (pFieldList->nField <= 1 || pFieldList->nField > TSDB_MAX_COLUMNS) { - invalidSqlErrMsg(pCmd, msg); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return false; } // first column must be timestamp if (pFieldList->p[0].type != TSDB_DATA_TYPE_TIMESTAMP) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } @@ -1213,7 +769,7 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { // max row length must be less than TSDB_MAX_BYTES_PER_ROW if (nLen > TSDB_MAX_BYTES_PER_ROW) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } @@ -1221,23 +777,23 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { for (int32_t i = 0; i < pFieldList->nField; ++i) { TAOS_FIELD* pField = &pFieldList->p[i]; if (pField->type < TSDB_DATA_TYPE_BOOL || pField->type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } if ((pField->type == TSDB_DATA_TYPE_BINARY && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_BINARY_LEN)) || (pField->type == TSDB_DATA_TYPE_NCHAR && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_NCHAR_LEN))) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } if (validateColumnName(pField->name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } if (has(pFieldList, i + 1, pFieldList->p[i].name) == true) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } @@ -1258,7 +814,7 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq // number of fields at least 1 if (pTagsList->nField < 1 || pTagsList->nField > TSDB_MAX_TAGS) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } @@ -1269,14 +825,14 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq // max tag row length must be less than TSDB_MAX_TAGS_LEN if (nLen > TSDB_MAX_TAGS_LEN) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } // field name must be unique for (int32_t i = 0; i < pTagsList->nField; ++i) { if (has(pFieldList, 0, pTagsList->p[i].name) == true) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } @@ -1284,28 +840,28 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq /* timestamp in tag is not allowed */ for (int32_t i = 0; i < pTagsList->nField; ++i) { if (pTagsList->p[i].type == TSDB_DATA_TYPE_TIMESTAMP) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } if (pTagsList->p[i].type < TSDB_DATA_TYPE_BOOL || pTagsList->p[i].type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } if ((pTagsList->p[i].type == TSDB_DATA_TYPE_BINARY && pTagsList->p[i].bytes <= 0) || (pTagsList->p[i].type == TSDB_DATA_TYPE_NCHAR && pTagsList->p[i].bytes <= 0)) { - invalidSqlErrMsg(pCmd, msg7); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); return false; } if (validateColumnName(pTagsList->p[i].name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } if (has(pTagsList, i + 1, pTagsList->p[i].name) == true) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } @@ -1324,7 +880,9 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { const char* msg5 = "invalid binary/nchar tag length"; const char* msg6 = "invalid data type in tags"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pCmd->numOfClause == 1); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; // no more than 6 tags @@ -1332,18 +890,18 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { char msg[128] = {0}; sprintf(msg, "tags no more than %d", TSDB_MAX_TAGS); - invalidSqlErrMsg(pCmd, msg); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return false; } // no timestamp allowable if (pTagField->type == TSDB_DATA_TYPE_TIMESTAMP) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } if (pTagField->type < TSDB_DATA_TYPE_BOOL && pTagField->type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } @@ -1356,19 +914,19 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { // length less than TSDB_MAX_TASG_LEN if (nLen + pTagField->bytes > TSDB_MAX_TAGS_LEN) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } // tags name can not be a keyword if (validateColumnName(pTagField->name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } // binary(val), val can not be equalled to or less than 0 if ((pTagField->type == TSDB_DATA_TYPE_BINARY || pTagField->type == TSDB_DATA_TYPE_NCHAR) && pTagField->bytes <= 0) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } @@ -1377,7 +935,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { if (strncasecmp(pTagField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } } @@ -1393,23 +951,24 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { const char* msg5 = "invalid column name"; const char* msg6 = "invalid column length"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pCmd->numOfClause == 1); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; // no more max columns if (pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS || pMeterMeta->numOfTags + pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } if (validateColumnName(pColField->name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } @@ -1421,20 +980,20 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { } if (pColField->bytes <= 0) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } // length less than TSDB_MAX_BYTES_PER_ROW if (nLen + pColField->bytes > TSDB_MAX_BYTES_PER_ROW) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } // field name must be unique for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { if (strncasecmp(pColField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } } @@ -1469,7 +1028,7 @@ static bool hasSpecifyDB(SSQLToken* pTableName) { return false; } -static int32_t setObjFullName(char* fullName, char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* xlen) { +int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* xlen) { int32_t totalLen = 0; if (account != NULL) { @@ -1532,50 +1091,55 @@ static void extractColumnNameFromString(tSQLExprItem* pItem) { } } -int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric) { +int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable) { assert(pSelection != NULL && pCmd != NULL); - const char* msg1 = "invalid column name/illegal column type in arithmetic expression"; + const char* msg1 = "invalid column name, or illegal column type"; const char* msg2 = "functions can not be mixed up"; const char* msg3 = "not support query expression"; + const char* msg4 = "columns from different table mixed up in arithmetic expression"; + const char* msg5 = "invalid function name"; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); for (int32_t i = 0; i < pSelection->nExpr; ++i) { - int32_t outputIndex = pCmd->fieldsInfo.numOfOutputCols; + int32_t outputIndex = pQueryInfo->fieldsInfo.numOfOutputCols; tSQLExprItem* pItem = &pSelection->a[i]; // project on all fields if (pItem->pNode->nSQLOptr == TK_ALL || pItem->pNode->nSQLOptr == TK_ID || pItem->pNode->nSQLOptr == TK_STRING) { // it is actually a function, but the function name is invalid if (pItem->pNode->nSQLOptr == TK_ID && (pItem->pNode->colInfo.z == NULL && pItem->pNode->colInfo.n == 0)) { - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } // if the name of column is quoted, remove it and set the right information for later process extractColumnNameFromString(pItem); - pCmd->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; // select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2 - if (addProjectionExprAndResultField(pCmd, pItem) != TSDB_CODE_SUCCESS) { + if (addProjectionExprAndResultField(pQueryInfo, pItem) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } } else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_LAST_ROW) { // sql function in selection clause, append sql function info in pSqlCmd structure sequentially - if (addExprAndResultField(pCmd, outputIndex, pItem) != TSDB_CODE_SUCCESS) { + if (addExprAndResultField(pQueryInfo, outputIndex, pItem) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } } else if (pItem->pNode->nSQLOptr >= TK_PLUS && pItem->pNode->nSQLOptr <= TK_REM) { // arithmetic function in select - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - - SColumnIdListRes columnList = {.pSchema = pSchema, .numOfCols = pMeterMetaInfo->pMeterMeta->numOfColumns}; - - int32_t ret = - validateArithmeticSQLExpr(pItem->pNode, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns, &columnList); - if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + SColumnList columnList = {0}; + if (validateArithmeticSQLExpr(pItem->pNode, pQueryInfo, &columnList) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } + + int32_t tableIndex = columnList.ids[0].tableIndex; + for(int32_t f = 1; f < columnList.num; ++f) { + if (columnList.ids[f].tableIndex != tableIndex) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } } char arithmeticExprStr[1024] = {0}; @@ -1586,10 +1150,10 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric } // expr string is set as the parameter of function - SColumnIndex index = {0}; - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, outputIndex, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, + SColumnIndex index = {.tableIndex = tableIndex}; + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputIndex, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), sizeof(double)); - addExprParams(pExpr, arithmeticExprStr, TSDB_DATA_TYPE_BINARY, strlen(arithmeticExprStr), 0); + addExprParams(pExpr, arithmeticExprStr, TSDB_DATA_TYPE_BINARY, strlen(arithmeticExprStr), index.tableIndex); /* todo alias name should use the original sql string */ if (pItem->aliasName != NULL) { @@ -1598,40 +1162,40 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric strncpy(pExpr->aliasName, arithmeticExprStr, TSDB_COL_NAME_LEN); } - insertResultField(pCmd, i, &columnList.list, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName); + insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName); } else { /* * not support such expression * e.g., select 12+5 from table_name */ - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - if (pCmd->fieldsInfo.numOfOutputCols > TSDB_MAX_COLUMNS) { + if (pQueryInfo->fieldsInfo.numOfOutputCols > TSDB_MAX_COLUMNS) { return TSDB_CODE_INVALID_SQL; } } - if (!functionCompatibleCheck(pCmd)) { - return invalidSqlErrMsg(pCmd, msg2); + if (!functionCompatibleCheck(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - if (isMetric) { - pCmd->type |= TSDB_QUERY_TYPE_STABLE_QUERY; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + if (isSTable) { + pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_QUERY; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - if (tscQueryMetricTags(pCmd)) { // local handle the metric tag query - pCmd->command = TSDB_SQL_RETRIEVE_TAGS; + if (tscQueryMetricTags(pQueryInfo)) { // local handle the metric tag query pCmd->count = pMeterMetaInfo->pMeterMeta->numOfColumns; // the number of meter schema, tricky. + pQueryInfo->command = TSDB_SQL_RETRIEVE_TAGS; } /* * transfer sql functions that need secondary merge into another format * in dealing with metric queries such as: count/first/last */ - tscTansformSQLFunctionForMetricQuery(pCmd); + tscTansformSQLFunctionForSTableQuery(pQueryInfo); - if (hasUnsupportFunctionsForMetricQuery(pCmd)) { + if (hasUnsupportFunctionsForSTableQuery(pQueryInfo)) { return TSDB_CODE_INVALID_SQL; } } @@ -1639,18 +1203,19 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric return TSDB_CODE_SUCCESS; } -int32_t insertResultField(SSqlCmd* pCmd, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, +int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, char* fieldName) { for (int32_t i = 0; i < pIdList->num; ++i) { - tscColumnBaseInfoInsert(pCmd, &(pIdList->ids[i])); + tscColumnBaseInfoInsert(pQueryInfo, &(pIdList->ids[i])); } - tscFieldInfoSetValue(&pCmd->fieldsInfo, outputIndex, type, fieldName, bytes); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, outputIndex, type, fieldName, bytes); return TSDB_CODE_SUCCESS; } -SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx, int32_t tableIndex) { - SMeterMeta* pMeterMeta = tscGetMeterMetaInfo(pCmd, tableIndex)->pMeterMeta; +SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIdx, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); + SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetColumnSchema(pMeterMeta, colIdx); int32_t numOfCols = pMeterMeta->numOfColumns; @@ -1658,21 +1223,21 @@ SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx, in int16_t functionId = (int16_t)((colIdx >= numOfCols) ? TSDB_FUNC_TAGPRJ : TSDB_FUNC_PRJ); if (functionId == TSDB_FUNC_TAGPRJ) { - addRequiredTagColumn(pCmd, colIdx - numOfCols, tableIndex); - pCmd->type = TSDB_QUERY_TYPE_STABLE_QUERY; + addRequiredTagColumn(pQueryInfo, colIdx - numOfCols, tableIndex); + pQueryInfo->type = TSDB_QUERY_TYPE_STABLE_QUERY; } else { - pCmd->type = TSDB_QUERY_TYPE_PROJECTION_QUERY; + pQueryInfo->type = TSDB_QUERY_TYPE_PROJECTION_QUERY; } SColumnIndex index = {tableIndex, colIdx}; SSqlExpr* pExpr = - tscSqlExprInsert(pCmd, outputIndex, functionId, &index, pSchema->type, pSchema->bytes, pSchema->bytes); + tscSqlExprInsert(pQueryInfo, outputIndex, functionId, &index, pSchema->type, pSchema->bytes, pSchema->bytes); return pExpr; } -void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); +void addRequiredTagColumn(SQueryInfo* pQueryInfo, int32_t tagColIndex, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); if (pMeterMetaInfo->numOfTags == 0 || pMeterMetaInfo->tagColumnIndex[pMeterMetaInfo->numOfTags - 1] < tagColIndex) { pMeterMetaInfo->tagColumnIndex[pMeterMetaInfo->numOfTags++] = tagColIndex; @@ -1698,10 +1263,11 @@ void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex assert(tagColIndex >= -1 && tagColIndex < TSDB_MAX_TAGS && pMeterMetaInfo->numOfTags <= TSDB_MAX_TAGS + 1); } -static void addProjectQueryCol(SSqlCmd* pCmd, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) { - SSqlExpr* pExpr = doAddProjectCol(pCmd, startPos, pIndex->columnIndex, pIndex->tableIndex); +static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) { + SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex); - SMeterMeta* pMeterMeta = tscGetMeterMetaInfo(pCmd, pIndex->tableIndex)->pMeterMeta; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pIndex->tableIndex); + SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetColumnSchema(pMeterMeta, pIndex->columnIndex); @@ -1715,42 +1281,42 @@ static void addProjectQueryCol(SSqlCmd* pCmd, int32_t startPos, SColumnIndex* pI ids.num = 0; } - insertResultField(pCmd, startPos, &ids, pExpr->resBytes, pExpr->resType, colName); + insertResultField(pQueryInfo, startPos, &ids, pExpr->resBytes, pExpr->resType, colName); } -void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, - SSchema* pColSchema, int16_t flag) { - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, outputColIndex, functionId, pIndex, pColSchema->type, pColSchema->bytes, - pColSchema->bytes); +void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, + SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) { + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type, + pColSchema->bytes, pColSchema->bytes); SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex); if (TSDB_COL_IS_TAG(flag)) { ids.num = 0; } - insertResultField(pCmd, outputColIndex, &ids, pColSchema->bytes, pColSchema->type, pColSchema->name); + insertResultField(pQueryInfo, outputColIndex, &ids, pColSchema->bytes, pColSchema->type, pColSchema->name); pExpr->colInfo.flag = flag; if (TSDB_COL_IS_TAG(flag)) { - addRequiredTagColumn(pCmd, pIndex->columnIndex, pIndex->tableIndex); + addRequiredTagColumn(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex); } } -static int32_t doAddProjectionExprAndResultFields(SSqlCmd* pCmd, SColumnIndex* pIndex, int32_t startPos) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pIndex->tableIndex); +static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, int32_t startPos) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pIndex->tableIndex); int32_t numOfTotalColumns = 0; SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetSchema(pMeterMeta); - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { numOfTotalColumns = pMeterMeta->numOfColumns + pMeterMeta->numOfTags; } else { numOfTotalColumns = pMeterMeta->numOfColumns; } for (int32_t j = 0; j < numOfTotalColumns; ++j) { - doAddProjectCol(pCmd, startPos + j, j, pIndex->tableIndex); + doAddProjectCol(pQueryInfo, startPos + j, j, pIndex->tableIndex); pIndex->columnIndex = j; SColumnList ids = {0}; @@ -1759,56 +1325,56 @@ static int32_t doAddProjectionExprAndResultFields(SSqlCmd* pCmd, SColumnIndex* p // tag columns do not add to source list ids.num = (j >= pMeterMeta->numOfColumns) ? 0 : 1; - insertResultField(pCmd, startPos + j, &ids, pSchema[j].bytes, pSchema[j].type, pSchema[j].name); + insertResultField(pQueryInfo, startPos + j, &ids, pSchema[j].bytes, pSchema[j].type, pSchema[j].name); } return numOfTotalColumns; } -int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem) { +int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem) { const char* msg0 = "invalid column name"; const char* msg1 = "tag for table query is not allowed"; - int32_t startPos = pCmd->fieldsInfo.numOfOutputCols; + int32_t startPos = pQueryInfo->fieldsInfo.numOfOutputCols; if (pItem->pNode->nSQLOptr == TK_ALL) { // project on all fields SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getTableIndexByName(&pItem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; + if (getTableIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } // all meters columns are required if (index.tableIndex == COLUMN_INDEX_INITIAL_VAL) { // all table columns are required. - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { index.tableIndex = i; - int32_t inc = doAddProjectionExprAndResultFields(pCmd, &index, startPos); + int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos); startPos += inc; } } else { - doAddProjectionExprAndResultFields(pCmd, &index, startPos); + doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos); } } else if (pItem->pNode->nSQLOptr == TK_ID) { // simple column projection query SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pItem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg0); + if (getColumnIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; strcpy(colSchema.name, TSQL_TBNAME_L); - pCmd->type = TSDB_QUERY_TYPE_STABLE_QUERY; - tscAddSpecialColumnForSelect(pCmd, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true); + pQueryInfo->type = TSDB_QUERY_TYPE_STABLE_QUERY; + tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true); } else { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; if (index.columnIndex >= pMeterMeta->numOfColumns && UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - addProjectQueryCol(pCmd, startPos, &index, pItem); + addProjectQueryCol(pQueryInfo, startPos, &index, pItem); } } else { return TSDB_CODE_INVALID_SQL; @@ -1817,7 +1383,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem) { return TSDB_CODE_SUCCESS; } -static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t functionID, char* aliasName, +static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName, int32_t resColIdx, SColumnIndex* pColIndex) { int16_t type = 0; int16_t bytes = 0; @@ -1829,7 +1395,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY || pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_NCHAR || pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BOOL) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(pQueryInfo->msg, msg1); return -1; } else { type = TSDB_DATA_TYPE_DOUBLE; @@ -1846,21 +1412,21 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t getRevisedName(columnName, functionID, TSDB_COL_NAME_LEN, pSchema[pColIndex->columnIndex].name); } - tscSqlExprInsert(pCmd, resColIdx, functionID, pColIndex, type, bytes, bytes); + tscSqlExprInsert(pQueryInfo, resColIdx, functionID, pColIndex, type, bytes, bytes); // for point interpolation/last_row query, we need the timestamp column to be loaded SColumnIndex index = {.tableIndex = pColIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; if (functionID == TSDB_FUNC_INTERP || functionID == TSDB_FUNC_LAST_ROW) { - tscColumnBaseInfoInsert(pCmd, &index); + tscColumnBaseInfoInsert(pQueryInfo, &index); } SColumnList ids = getColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex); - insertResultField(pCmd, resColIdx, &ids, bytes, type, columnName); + insertResultField(pQueryInfo, resColIdx, &ids, bytes, type, columnName); return TSDB_CODE_SUCCESS; } -int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem) { +int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIdx, tSQLExprItem* pItem) { SMeterMetaInfo* pMeterMetaInfo = NULL; int32_t optr = pItem->pNode->nSQLOptr; @@ -1875,7 +1441,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem case TK_COUNT: { if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) { /* more than one parameter for count() function */ - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } int16_t functionID = 0; @@ -1888,7 +1454,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (pItem->pNode->pParam != NULL) { SSQLToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo; if (pToken->z == NULL || pToken->n == 0) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; @@ -1897,20 +1463,20 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // check if the table name is valid or not SSQLToken tmpToken = pParamElem->pNode->colInfo; - if (getTableIndexByName(&tmpToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg4); + if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - tscSqlExprInsert(pCmd, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); + tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); } else { // count the number of meters created according to the metric - if (getColumnIndexByNameEx(pToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + if (getColumnIndexByName(pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); // count tag is equalled to count(tbname) if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { @@ -1918,13 +1484,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem } int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - tscSqlExprInsert(pCmd, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); + tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); } } else { // count(*) is equalled to count(primary_timestamp_key) index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - tscSqlExprInsert(pCmd, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); + tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); } char columnName[TSDB_COL_NAME_LEN] = {0}; @@ -1933,7 +1499,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // count always use the primary timestamp key column, which is 0. SColumnList ids = getColumnList(1, index.tableIndex, index.columnIndex); - insertResultField(pCmd, colIdx, &ids, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, columnName); + insertResultField(pQueryInfo, colIdx, &ids, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, columnName); return TSDB_CODE_SUCCESS; } case TK_SUM: @@ -1948,27 +1514,27 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (pItem->pNode->pParam == NULL || (optr != TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 1) || (optr == TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 3)) { /* no parameters or more than one parameter for function */ - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) || - index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - return invalidSqlErrMsg(pCmd, msg3); + if ((getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) || + index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } // 2. check if sql function can be applied on this column data type - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, index.columnIndex); int16_t colType = pSchema->type; if (colType <= TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } char columnName[TSDB_COL_NAME_LEN] = {0}; @@ -1992,18 +1558,19 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (optr == TK_DIFF) { colIdx += 1; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; - tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + tscSqlExprInsert(pQueryInfo, 0, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, + TSDB_KEYSIZE); SColumnList ids = getColumnList(1, 0, 0); - insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName); + insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName); } // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - return invalidSqlErrMsg(pCmd, msg6); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionID, &index, resultType, resultSize, resultSize); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, resultType, resultSize, resultSize); if (optr == TK_LEASTSQUARES) { /* set the leastsquares parameters */ @@ -2026,7 +1593,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem ids.num = 1; ids.ids[0] = index; - insertResultField(pCmd, colIdx, &ids, pExpr->resBytes, pExpr->resType, columnName); + insertResultField(pQueryInfo, colIdx, &ids, pExpr->resBytes, pExpr->resType, columnName); return TSDB_CODE_SUCCESS; } @@ -2042,7 +1609,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (!requireAllFields) { if (pItem->pNode->pParam->nExpr < 1) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } /* in first/last function, multiple columns can be add to resultset */ @@ -2050,7 +1617,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem for (int32_t i = 0; i < pItem->pNode->pParam->nExpr; ++i) { tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[i]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -2059,34 +1626,34 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // select table.* SSQLToken tmpToken = pParamElem->pNode->colInfo; - if (getTableIndexByName(&tmpToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg4); + if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); for (int32_t j = 0; j < pMeterMetaInfo->pMeterMeta->numOfColumns; ++j) { index.columnIndex = j; - if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx++, &index) != 0) { + if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIdx++, &index) != 0) { return TSDB_CODE_INVALID_SQL; } } } else { - if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - return invalidSqlErrMsg(pCmd, msg6); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } - if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx + i, &index) != 0) { + if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIdx + i, &index) != 0) { return TSDB_CODE_INVALID_SQL; } } @@ -2096,13 +1663,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem } else { // select * from xxx int32_t numOfFields = 0; - for (int32_t j = 0; j < pCmd->numOfTables; ++j) { - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, j); + for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, j); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); for (int32_t i = 0; i < pMeterMetaInfo->pMeterMeta->numOfColumns; ++i) { SColumnIndex index = {.tableIndex = j, .columnIndex = i}; - if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx + i + j, &index) != 0) { + if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIdx + i + j, &index) != + 0) { return TSDB_CODE_INVALID_SQL; } } @@ -2120,39 +1688,39 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // 1. valid the number of parameters if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 2) { /* no parameters or more than one parameter for function */ - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } char columnName[TSDB_COL_NAME_LEN] = {0}; getColumnName(pItem, columnName, TSDB_COL_NAME_LEN); SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - return invalidSqlErrMsg(pCmd, msg6); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } // 2. valid the column type int16_t colType = pSchema[index.columnIndex].type; if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // 3. valid the parameters if (pParamElem[1].pNode->nSQLOptr == TK_ID) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tVariant* pVariant = &pParamElem[1].pNode->val; @@ -2167,7 +1735,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem double dp = GET_DOUBLE_VAL(val); if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) { - return invalidSqlErrMsg(pCmd, msg5); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } resultSize = sizeof(double); @@ -2183,14 +1751,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem return TSDB_CODE_INVALID_SQL; } - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionId, &index, resultType, resultSize, resultSize); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, colIdx, functionId, &index, resultType, resultSize, resultSize); addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); } else { tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT); int64_t nTop = *((int32_t*)val); if (nTop <= 0 || nTop > 100) { // todo use macro - return invalidSqlErrMsg(pCmd, msg5); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } int16_t functionId = 0; @@ -2200,22 +1768,22 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // set the first column ts for top/bottom query SColumnIndex index1 = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + tscSqlExprInsert(pQueryInfo, 0, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); const int32_t TS_COLUMN_INDEX = 0; SColumnList ids = getColumnList(1, 0, TS_COLUMN_INDEX); - insertResultField(pCmd, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, + insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); colIdx += 1; // the first column is ts numOfAddedColumn += 1; - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionId, &index, resultType, resultSize, resultSize); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, colIdx, functionId, &index, resultType, resultSize, resultSize); addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t), 0); } SColumnList ids = getColumnList(1, 0, index.columnIndex); - insertResultField(pCmd, colIdx, &ids, resultSize, resultType, columnName); + insertResultField(pQueryInfo, colIdx, &ids, resultSize, resultType, columnName); return TSDB_CODE_SUCCESS; } @@ -2260,8 +1828,8 @@ static bool isTablenameToken(SSQLToken* token) { return (strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_TBNAME_L)); } -static int16_t doGetColumnIndex(SSqlCmd* pCmd, int32_t index, SSQLToken* pToken) { - SMeterMeta* pMeterMeta = tscGetMeterMetaInfo(pCmd, index)->pMeterMeta; +static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken* pToken) { + SMeterMeta* pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index)->pMeterMeta; int32_t numOfCols = pMeterMeta->numOfColumns + pMeterMeta->numOfTags; SSchema* pSchema = tsGetSchema(pMeterMeta); @@ -2281,7 +1849,7 @@ static int16_t doGetColumnIndex(SSqlCmd* pCmd, int32_t index, SSQLToken* pToken) return columnIndex; } -int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { +int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { const char* msg0 = "ambiguous column name"; const char* msg1 = "invalid column name"; @@ -2292,12 +1860,12 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p } else { // not specify the table name, try to locate the table index by column name if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) { - for (int16_t i = 0; i < pCmd->numOfTables; ++i) { - int16_t colIndex = doGetColumnIndex(pCmd, i, pToken); + for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) { + int16_t colIndex = doGetColumnIndex(pQueryInfo, i, pToken); if (colIndex != COLUMN_INDEX_INITIAL_VAL) { if (pIndex->columnIndex != COLUMN_INDEX_INITIAL_VAL) { - return invalidSqlErrMsg(pCmd, msg0); + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } else { pIndex->tableIndex = i; pIndex->columnIndex = colIndex; @@ -2305,14 +1873,14 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p } } } else { // table index is valid, get the column index - int16_t colIndex = doGetColumnIndex(pCmd, pIndex->tableIndex, pToken); + int16_t colIndex = doGetColumnIndex(pQueryInfo, pIndex->tableIndex, pToken); if (colIndex != COLUMN_INDEX_INITIAL_VAL) { pIndex->columnIndex = colIndex; } } if (pIndex->columnIndex == COLUMN_INDEX_INITIAL_VAL) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } @@ -2323,9 +1891,9 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p } } -static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { +int32_t getMeterIndex(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { if (pTableToken->n == 0) { // only one table and no table name prefix in column name - if (pCmd->numOfTables == 1) { + if (pQueryInfo->numOfTables == 1) { pIndex->tableIndex = 0; } @@ -2335,8 +1903,8 @@ static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex pIndex->tableIndex = COLUMN_INDEX_INITIAL_VAL; char tableName[TSDB_METER_ID_LEN + 1] = {0}; - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); extractTableName(pMeterMetaInfo->name, tableName); if (strncasecmp(tableName, pTableToken->z, pTableToken->n) == 0 && strlen(tableName) == pTableToken->n) { @@ -2352,29 +1920,29 @@ static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex return TSDB_CODE_SUCCESS; } -int32_t getTableIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { +int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { SSQLToken tableToken = {0}; extractTableNameFromToken(pToken, &tableToken); - if (getMeterIndex(&tableToken, pCmd, pIndex) != TSDB_CODE_SUCCESS) { + if (getMeterIndex(&tableToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } return TSDB_CODE_SUCCESS; } -int32_t getColumnIndexByNameEx(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { - if (pCmd->pMeterInfo == NULL || pCmd->numOfTables == 0) { +int32_t getColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { + if (pQueryInfo->pMeterInfo == NULL || pQueryInfo->numOfTables == 0) { return TSDB_CODE_INVALID_SQL; } SSQLToken tmpToken = *pToken; - if (getTableIndexByName(&tmpToken, pCmd, pIndex) != TSDB_CODE_SUCCESS) { + if (getTableIndexByName(&tmpToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - return doGetColumnIndexByName(&tmpToken, pCmd, pIndex); + return doGetColumnIndexByName(&tmpToken, pQueryInfo, pIndex); } int32_t changeFunctionID(int32_t optr, int16_t* functionId) { @@ -2440,139 +2008,76 @@ int32_t changeFunctionID(int32_t optr, int16_t* functionId) { return TSDB_CODE_SUCCESS; } -// TODO support like for showing metrics, there are show meters with like ops int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + assert(pCmd->numOfClause == 1); + pCmd->command = TSDB_SQL_SHOW; - int8_t type = pInfo->sqlType; - const char* msg = "database name too long"; - const char* msg1 = "invalid database name"; + const char* msg1 = "invalid name"; const char* msg2 = "pattern filter string too long"; - - switch (type) { - case SHOW_VGROUPS: - pCmd->showType = TSDB_MGMT_TABLE_VGROUP; - break; - case SHOW_TABLES: - pCmd->showType = TSDB_MGMT_TABLE_TABLE; - break; - case SHOW_STABLES: - pCmd->showType = TSDB_MGMT_TABLE_METRIC; - break; - - case SHOW_DATABASES: - pCmd->showType = TSDB_MGMT_TABLE_DB; - break; - case SHOW_MNODES: - pCmd->showType = TSDB_MGMT_TABLE_MNODE; - break; - case SHOW_DNODES: - pCmd->showType = TSDB_MGMT_TABLE_PNODE; - break; - case SHOW_ACCOUNTS: - pCmd->showType = TSDB_MGMT_TABLE_ACCT; - break; - case SHOW_USERS: - pCmd->showType = TSDB_MGMT_TABLE_USER; - break; - case SHOW_MODULES: - pCmd->showType = TSDB_MGMT_TABLE_MODULE; - break; - case SHOW_CONNECTIONS: - pCmd->showType = TSDB_MGMT_TABLE_CONNS; - break; - case SHOW_QUERIES: - pCmd->showType = TSDB_MGMT_TABLE_QUERIES; - break; - case SHOW_SCORES: - pCmd->showType = TSDB_MGMT_TABLE_SCORES; - break; - case SHOW_GRANTS: - pCmd->showType = TSDB_MGMT_TABLE_GRANTS; - break; - case SHOW_STREAMS: - pCmd->showType = TSDB_MGMT_TABLE_STREAMS; - break; - case SHOW_CONFIGS: - pCmd->showType = TSDB_MGMT_TABLE_CONFIGS; - break; - case SHOW_VNODES: - pCmd->showType = TSDB_MGMT_TABLE_VNODES; - break; - default: - return TSDB_CODE_INVALID_SQL; - } + const char* msg3 = "database name too long"; + const char* msg4 = "invalid ip address"; + const char* msg5 = "database name is empty"; + const char* msg6 = "pattern string is empty"; /* * database prefix in pInfo->pDCLInfo->a[0] * wildcard in like clause in pInfo->pDCLInfo->a[1] */ - if (type == SHOW_TABLES || type == SHOW_STABLES || type == SHOW_VGROUPS) { + SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt; + int16_t showType = pShowInfo->showType; + if (showType == TSDB_MGMT_TABLE_TABLE || showType == TSDB_MGMT_TABLE_METRIC || showType == TSDB_MGMT_TABLE_VGROUP) { // db prefix in tagCond, show table conds in payload - if (pInfo->pDCLInfo->nTokens > 0) { - SSQLToken* pDbPrefixToken = &pInfo->pDCLInfo->a[0]; + SSQLToken* pDbPrefixToken = &pShowInfo->prefix; + if (pDbPrefixToken->type != 0) { + assert(pDbPrefixToken->n >= 0); if (pDbPrefixToken->n > TSDB_DB_NAME_LEN) { // db name is too long - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (pDbPrefixToken->n > 0 && tscValidateName(pDbPrefixToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (pDbPrefixToken->n <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - int32_t ret = 0; - if (pDbPrefixToken->n > 0) { // has db prefix - ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pDbPrefixToken, NULL, NULL); + if (tscValidateName(pDbPrefixToken) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } + int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pDbPrefixToken, NULL, NULL); if (ret != TSDB_CODE_SUCCESS) { - return ret; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } + } - if (pInfo->pDCLInfo->nTokens == 2) { - if (type == SHOW_VGROUPS) { - // set the table name for show vgroups - SSQLToken* meterId = &pInfo->pDCLInfo->a[1]; - if (0 == pDbPrefixToken->n) { - SSQLToken db = {0}; - getCurrentDBName(pSql, &db); - pDbPrefixToken = &db; - } - ret = setObjFullName(pCmd->payload, NULL, pDbPrefixToken, meterId, &(pCmd->payloadLen)); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - } else { - // set the like conds for show tables/stables - SSQLToken* likeToken = &pInfo->pDCLInfo->a[1]; - - strncpy(pCmd->payload, likeToken->z, likeToken->n); - pCmd->payloadLen = strdequote(pCmd->payload); - - if (pCmd->payloadLen > TSDB_METER_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg2); - } - } + // show table/stable like 'xxxx', set the like pattern for show tables + SSQLToken* pPattern = &pShowInfo->pattern; + if (pPattern->type != 0) { + pPattern->n = strdequote(pPattern->z); + + if (pPattern->n <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + + if (pCmd->payloadLen > TSDB_METER_NAME_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } } - } else if (type == SHOW_VNODES) { - if (NULL == pInfo->pDCLInfo) { - return invalidSqlErrMsg(pCmd, "No specified ip of dnode"); + } else if (showType == TSDB_MGMT_TABLE_VNODES) { + if (pShowInfo->prefix.type == 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "No specified ip of dnode"); } // show vnodes may be ip addr of dnode in payload - if (pInfo->pDCLInfo->nTokens > 0) { - SSQLToken* pDnodeIp = &pInfo->pDCLInfo->a[0]; - - if (pDnodeIp->n > TSDB_IPv4ADDR_LEN) { // ip addr is too long - return invalidSqlErrMsg(pCmd, msg); - } + SSQLToken* pDnodeIp = &pShowInfo->prefix; + if (pDnodeIp->n > TSDB_IPv4ADDR_LEN) { // ip addr is too long + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } - strncpy(pCmd->payload, pDnodeIp->z, pDnodeIp->n); - pCmd->payloadLen = strdequote(pCmd->payload); + if (!validateIpAddress(pDnodeIp->z, pDnodeIp->n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } } @@ -2580,60 +2085,54 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { - SSqlCmd* pCmd = &pSql->cmd; + const char* msg1 = "invalid ip address"; + const char* msg2 = "invalid port"; - switch (pInfo->sqlType) { - case KILL_QUERY: - pCmd->command = TSDB_SQL_KILL_QUERY; - break; - case KILL_STREAM: - pCmd->command = TSDB_SQL_KILL_STREAM; - break; - case KILL_CONNECTION: - pCmd->command = TSDB_SQL_KILL_CONNECTION; - break; - default: - return TSDB_CODE_INVALID_SQL; - } + SSqlCmd* pCmd = &pSql->cmd; + pCmd->command = pInfo->type; - SSQLToken* pToken = &(pInfo->pDCLInfo->a[0]); - if (pToken->n > TSDB_KILL_MSG_LEN) { + SSQLToken* ip = &(pInfo->pDCLInfo->ip); + if (ip->n > TSDB_KILL_MSG_LEN) { return TSDB_CODE_INVALID_SQL; } - strncpy(pCmd->payload, pToken->z, pToken->n); + strncpy(pCmd->payload, ip->z, ip->n); const char delim = ':'; - char* ipStr = strtok(pToken->z, &delim); - char* portStr = strtok(NULL, &delim); - if (!validateIpAddress(ipStr)) { + char* ipStr = strtok(ip->z, &delim); + char* portStr = strtok(NULL, &delim); + + if (!validateIpAddress(ipStr, strlen(ipStr))) { memset(pCmd->payload, 0, tListLen(pCmd->payload)); - const char* msg = "invalid ip address"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } uint16_t port = (uint16_t)strtol(portStr, NULL, 10); if (port <= 0 || port > 65535) { memset(pCmd->payload, 0, tListLen(pCmd->payload)); - - const char* msg = "invalid port"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } return TSDB_CODE_SUCCESS; } -bool validateIpAddress(char* ip) { - in_addr_t ipAddr = inet_addr(ip); - return (ipAddr != 0) && (ipAddr != 0xffffffff); +bool validateIpAddress(const char* ip, size_t size) { + char tmp[128] = {0}; // buffer to build null-terminated string + assert(size < 128); + + strncpy(tmp, ip, size); + + in_addr_t ipAddr = inet_addr(tmp); + + return ipAddr != INADDR_NONE; } -int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd* pCmd) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t tscTansformSQLFunctionForSTableQuery(SQueryInfo* pQueryInfo) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - if (pMeterMetaInfo->pMeterMeta == NULL || !UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (pMeterMetaInfo->pMeterMeta == NULL || !UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return TSDB_CODE_INVALID_SQL; } @@ -2643,9 +2142,9 @@ int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd* pCmd) { int16_t type = 0; int16_t intermediateBytes = 0; - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, k); - TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); + TAOS_FIELD* pField = tscFieldInfoGetField(pQueryInfo, k); int16_t functionId = aAggs[pExpr->functionId].stableFuncId; @@ -2656,26 +2155,26 @@ int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd* pCmd) { return TSDB_CODE_INVALID_SQL; } - tscSqlExprUpdate(pCmd, k, functionId, pExpr->colInfo.colIdx, TSDB_DATA_TYPE_BINARY, bytes); + tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->colInfo.colIdx, TSDB_DATA_TYPE_BINARY, bytes); // todo refactor pExpr->interResBytes = intermediateBytes; } } - tscFieldInfoUpdateOffset(pCmd); + tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); return TSDB_CODE_SUCCESS; } /* transfer the field-info back to original input format */ -void tscRestoreSQLFunctionForMetricQuery(SSqlCmd* pCmd) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { +void tscRestoreSQLFunctionForMetricQuery(SQueryInfo* pQueryInfo) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return; } - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); - TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + TAOS_FIELD* pField = tscFieldInfoGetField(pQueryInfo, i); if ((pExpr->functionId >= TSDB_FUNC_FIRST_DST && pExpr->functionId <= TSDB_FUNC_LAST_DST) || (pExpr->functionId >= TSDB_FUNC_SUM && pExpr->functionId <= TSDB_FUNC_MAX)) { @@ -2685,26 +2184,29 @@ void tscRestoreSQLFunctionForMetricQuery(SSqlCmd* pCmd) { } } -bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { +bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo) { const char* msg1 = "TWA not allowed to apply to super table directly"; const char* msg2 = "TWA only support group by tbname for super table query"; - + const char* msg3 = "function not support for super table query"; + // filter sql function not supported by metric query yet. - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_METRIC) == 0) { + invalidSqlErrMsg(pQueryInfo->msg, msg3); return true; } } - if (tscIsTWAQuery(pCmd)) { - if (pCmd->groupbyExpr.numOfGroupCols == 0) { - invalidSqlErrMsg(pCmd, msg1); + if (tscIsTWAQuery(pQueryInfo)) { + if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) { + invalidSqlErrMsg(pQueryInfo->msg, msg1); return true; } - if (pCmd->groupbyExpr.numOfGroupCols != 1 || pCmd->groupbyExpr.columnInfo[0].colIdx != TSDB_TBNAME_COLUMN_INDEX) { - invalidSqlErrMsg(pCmd, msg2); + if (pQueryInfo->groupbyExpr.numOfGroupCols != 1 || + pQueryInfo->groupbyExpr.columnInfo[0].colIdx != TSDB_TBNAME_COLUMN_INDEX) { + invalidSqlErrMsg(pQueryInfo->msg, msg2); return true; } } @@ -2712,21 +2214,21 @@ bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { return false; } -static bool functionCompatibleCheck(SSqlCmd* pCmd) { +static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) { int32_t startIdx = 0; - int32_t functionID = tscSqlExprGet(pCmd, startIdx)->functionId; + int32_t functionID = tscSqlExprGet(pQueryInfo, startIdx)->functionId; // ts function can be simultaneously used with any other functions. if (functionID == TSDB_FUNC_TS || functionID == TSDB_FUNC_TS_DUMMY) { startIdx++; } - int32_t factor = funcCompatDefList[tscSqlExprGet(pCmd, startIdx)->functionId]; + int32_t factor = funcCompatDefList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; // diff function cannot be executed with other function // arithmetic function can be executed with other arithmetic functions - for (int32_t i = startIdx + 1; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = startIdx + 1; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int16_t functionId = pExpr->functionId; if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) { @@ -2745,21 +2247,21 @@ static bool functionCompatibleCheck(SSqlCmd* pCmd) { return true; } -void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); +void updateTagColumnIndex(SQueryInfo* pQueryInfo, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); /* * update tags column index for group by tags * group by columns belong to this table */ - if (pCmd->groupbyExpr.numOfGroupCols > 0 && pCmd->groupbyExpr.tableIndex == tableIndex) { - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { - int32_t index = pCmd->groupbyExpr.columnInfo[i].colIdx; + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 && pQueryInfo->groupbyExpr.tableIndex == tableIndex) { + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { + int32_t index = pQueryInfo->groupbyExpr.columnInfo[i].colIdx; for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { int32_t tagColIndex = pMeterMetaInfo->tagColumnIndex[j]; if (tagColIndex == index) { - pCmd->groupbyExpr.columnInfo[i].colIdx = j; + pQueryInfo->groupbyExpr.columnInfo[i].colIdx = j; break; } } @@ -2767,8 +2269,8 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { } // update tags column index for expression - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (!TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { // not tags, continue continue; @@ -2788,7 +2290,7 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { } // update join condition tag column index - SJoinInfo* pJoinInfo = &pCmd->tagCond.joinInfo; + SJoinInfo* pJoinInfo = &pQueryInfo->tagCond.joinInfo; if (!pJoinInfo->hasJoin) { // not join query return; } @@ -2813,7 +2315,7 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { } } -int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { +int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd) { const char* msg1 = "too many columns in group by clause"; const char* msg2 = "invalid column name in group by clause"; const char* msg3 = "group by columns must belong to one table"; @@ -2828,9 +2330,9 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { return TSDB_CODE_SUCCESS; } - pCmd->groupbyExpr.numOfGroupCols = pList->nExpr; + pQueryInfo->groupbyExpr.numOfGroupCols = pList->nExpr; if (pList->nExpr > TSDB_MAX_TAGS) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } SMeterMeta* pMeterMeta = NULL; @@ -2845,17 +2347,17 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&token, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); + if (getColumnIndexByName(&token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } if (tableIndex != index.tableIndex && tableIndex >= 0) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } tableIndex = index.tableIndex; - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); pMeterMeta = pMeterMetaInfo->pMeterMeta; if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { @@ -2870,8 +2372,8 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { } if (groupTag) { - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - return invalidSqlErrMsg(pCmd, msg9); + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg9); } int32_t relIndex = index.columnIndex; @@ -2879,36 +2381,36 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { relIndex -= pMeterMeta->numOfColumns; } - pCmd->groupbyExpr.columnInfo[i] = + pQueryInfo->groupbyExpr.columnInfo[i] = (SColIndexEx){.colIdx = relIndex, .flag = TSDB_COL_TAG, .colId = pSchema->colId}; // relIndex; - addRequiredTagColumn(pCmd, pCmd->groupbyExpr.columnInfo[i].colIdx, index.tableIndex); + addRequiredTagColumn(pQueryInfo, pQueryInfo->groupbyExpr.columnInfo[i].colIdx, index.tableIndex); } else { // check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by - if (pSchema->type > TSDB_DATA_TYPE_BIGINT) { - return invalidSqlErrMsg(pCmd, msg8); + if (pSchema->type > TSDB_DATA_TYPE_BINARY) { + return invalidSqlErrMsg(pQueryInfo->msg, msg8); } - tscColumnBaseInfoInsert(pCmd, &index); - pCmd->groupbyExpr.columnInfo[i] = + tscColumnBaseInfoInsert(pQueryInfo, &index); + pQueryInfo->groupbyExpr.columnInfo[i] = (SColIndexEx){.colIdx = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId}; // relIndex; - pCmd->groupbyExpr.orderType = TSQL_SO_ASC; + pQueryInfo->groupbyExpr.orderType = TSQL_SO_ASC; if (i == 0 && pList->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg7); + return invalidSqlErrMsg(pQueryInfo->msg, msg7); } } } - pCmd->groupbyExpr.tableIndex = tableIndex; + pQueryInfo->groupbyExpr.tableIndex = tableIndex; return TSDB_CODE_SUCCESS; } -void setColumnOffsetValueInResultset(SSqlCmd* pCmd) { - if (QUERY_IS_STABLE_QUERY(pCmd->type)) { - tscFieldInfoUpdateOffset(pCmd); +void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo) { + if (QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { + tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); } else { - tscFieldInfoCalOffset(pCmd); + tscFieldInfoCalOffset(pQueryInfo); } } @@ -2918,7 +2420,7 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumnBase* pColumn) { } int32_t size = pColumn->numOfFilters + 1; - char* tmp = realloc(pColumn->filterInfo, sizeof(SColumnFilterInfo) * (size)); + char* tmp = (char*)realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size)); if (tmp != NULL) { pColumn->filterInfo = (SColumnFilterInfo*)tmp; } @@ -2931,12 +2433,12 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumnBase* pColumn) { return pColFilterInfo; } -static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SColumnFilterInfo* pColumnFilter, SColumnIndex* columnIndex, - tSQLExpr* pExpr) { +static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter, + SColumnIndex* columnIndex, tSQLExpr* pExpr) { const char* msg = "not supported filter condition"; tSQLExpr* pRight = pExpr->pRight; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, columnIndex->tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, columnIndex->tableIndex); SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, columnIndex->columnIndex); @@ -2946,7 +2448,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SColumnFilterInfo* pColu } else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) { colType = TSDB_DATA_TYPE_DOUBLE; } else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BINARY == pRight->val.nType)) { - int retVal = setColumnFilterInfoForTimestamp(pCmd, &pRight->val); + int retVal = setColumnFilterInfoForTimestamp(pQueryInfo, &pRight->val); if (TSDB_CODE_SUCCESS != retVal) { return retVal; } @@ -2996,7 +2498,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SColumnFilterInfo* pColu pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE; break; default: - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } return TSDB_CODE_SUCCESS; @@ -3174,8 +2676,8 @@ enum { TSQL_EXPR_TBNAME = 3, }; -static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pIndex->tableIndex); +static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pIndex->tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetColumnSchema(pMeterMeta, pIndex->columnIndex); @@ -3183,7 +2685,7 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SColumnIndex* pIndex, tSQL const char* msg1 = "non binary column not support like operator"; const char* msg2 = "binary column not support this operator"; - SColumnBase* pColumn = tscColumnBaseInfoInsert(pCmd, pIndex); + SColumnBase* pColumn = tscColumnBaseInfoInsert(pQueryInfo, pIndex); SColumnFilterInfo* pColFilter = NULL; /* @@ -3209,25 +2711,25 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SColumnIndex* pIndex, tSQL if (pColFilter->filterOnBinary) { if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE && pExpr->nSQLOptr != TK_LIKE) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } else { if (pExpr->nSQLOptr == TK_LIKE) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } pColumn->colIndex = *pIndex; - return doExtractColumnFilterInfo(pCmd, pColFilter, pIndex, pExpr); + return doExtractColumnFilterInfo(pQueryInfo, pColFilter, pIndex, pExpr); } -static void relToString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { +static void relToString(tSQLExpr* pExpr, char** str) { assert(pExpr->nSQLOptr == TK_AND || pExpr->nSQLOptr == TK_OR); const char* or = "OR"; const char*and = "AND"; - // if (pCmd->tagCond.relType == TSQL_STABLE_QTYPE_COND) { + // if (pQueryInfo->tagCond.relType == TSQL_STABLE_QTYPE_COND) { if (pExpr->nSQLOptr == TK_AND) { strcpy(*str, and); *str += strlen(and); @@ -3235,10 +2737,9 @@ static void relToString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { strcpy(*str, or); *str += strlen(or); } - // } } -static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { +static int32_t getTagCondString(tSQLExpr* pExpr, char** str) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -3247,14 +2748,14 @@ static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { *(*str) = '('; *str += 1; - int32_t ret = getTagCondString(pCmd, pExpr->pLeft, str); + int32_t ret = getTagCondString(pExpr->pLeft, str); if (ret != TSDB_CODE_SUCCESS) { return ret; } - relToString(pCmd, pExpr, str); + relToString(pExpr, str); - ret = getTagCondString(pCmd, pExpr->pRight, str); + ret = getTagCondString(pExpr->pRight, str); *(*str) = ')'; *str += 1; @@ -3265,7 +2766,7 @@ static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { return tSQLExprLeafToString(pExpr, true, str); } -static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, /*char* str*/ SStringBuilder* sb) { +static int32_t getTablenameCond(SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SStringBuilder* sb) { const char* msg0 = "invalid table name list"; if (pTableCond == NULL) { @@ -3288,57 +2789,55 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, /*char* str } if (ret != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg0); + invalidSqlErrMsg(pQueryInfo->msg, msg0); } return ret; } -static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, tSQLExpr* pExpr, int32_t relOptr) { +static int32_t getColumnQueryCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t relOptr) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } if (!isExprDirectParentOfLeaftNode(pExpr)) { // internal node - int32_t ret = getColumnQueryCondInfo(pCmd, pExpr->pLeft, pExpr->nSQLOptr); + int32_t ret = getColumnQueryCondInfo(pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr); if (ret != TSDB_CODE_SUCCESS) { return ret; } - return getColumnQueryCondInfo(pCmd, pExpr->pRight, pExpr->nSQLOptr); + return getColumnQueryCondInfo(pQueryInfo, pExpr->pRight, pExpr->nSQLOptr); } else { // handle leaf node SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - return extractColumnFilterInfo(pCmd, &index, pExpr, relOptr); + return extractColumnFilterInfo(pQueryInfo, &index, pExpr, relOptr); } } -static int32_t getJoinCondInfo(SSqlObj* pSql, tSQLExpr* pExpr) { +static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) { const char* msg = "invalid join query condition"; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - SSqlCmd* pCmd = &pSql->cmd; - if (!isExprDirectParentOfLeaftNode(pExpr)) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } - STagCond* pTagCond = &pCmd->tagCond; + STagCond* pTagCond = &pQueryInfo->tagCond; SJoinNode* pLeft = &pTagCond->joinInfo.left; SJoinNode* pRight = &pTagCond->joinInfo.right; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); int16_t tagColIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; pLeft->uid = pMeterMetaInfo->pMeterMeta->uid; @@ -3346,11 +2845,11 @@ static int32_t getJoinCondInfo(SSqlObj* pSql, tSQLExpr* pExpr) { strcpy(pLeft->meterId, pMeterMetaInfo->name); index = (SColumnIndex)COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pRight->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); tagColIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; pRight->uid = pMeterMetaInfo->pMeterMeta->uid; @@ -3395,31 +2894,22 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { return TSDB_CODE_SUCCESS; } -static int32_t validateSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols, SColumnIdListRes* pList) { +static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList) { if (pExpr->nSQLOptr == TK_ID) { - bool validColumnName = false; - - SColumnList* list = &pList->list; - - for (int32_t i = 0; i < numOfCols; ++i) { - if (strncasecmp(pExpr->colInfo.z, pSchema[i].name, pExpr->colInfo.n) == 0 && - pExpr->colInfo.n == strlen(pSchema[i].name)) { - if (pSchema[i].type < TSDB_DATA_TYPE_TINYINT || pSchema[i].type > TSDB_DATA_TYPE_DOUBLE) { - return TSDB_CODE_INVALID_SQL; - } - - if (pList != NULL) { - list->ids[list->num++].columnIndex = (int16_t)i; - } - - validColumnName = true; + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - } - - if (!validColumnName) { - return TSDB_CODE_INVALID_SQL; - } + // if column is timestamp, bool, binary, nchar, not support arithmetic, so return invalid sql + SMeterMeta* pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex)->pMeterMeta; + SSchema* pSchema = tsGetSchema(pMeterMeta) + index.columnIndex; + if ((pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) || (pSchema->type == TSDB_DATA_TYPE_BOOL) + || (pSchema->type == TSDB_DATA_TYPE_BINARY) || (pSchema->type == TSDB_DATA_TYPE_NCHAR)){ + return TSDB_CODE_INVALID_SQL; + } + + pList->ids[pList->num++] = index; } else if (pExpr->nSQLOptr == TK_FLOAT && (isnan(pExpr->val.dKey) || isinf(pExpr->val.dKey))) { return TSDB_CODE_INVALID_SQL; } else if (pExpr->nSQLOptr >= TK_MIN && pExpr->nSQLOptr <= TK_LAST_ROW) { @@ -3429,20 +2919,19 @@ static int32_t validateSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfC return TSDB_CODE_SUCCESS; } -static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols, - SColumnIdListRes* pList) { +static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } tSQLExpr* pLeft = pExpr->pLeft; if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { - int32_t ret = validateArithmeticSQLExpr(pLeft, pSchema, numOfCols, pList); + int32_t ret = validateArithmeticSQLExpr(pLeft, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } } else { - int32_t ret = validateSQLExpr(pLeft, pSchema, numOfCols, pList); + int32_t ret = validateSQLExpr(pLeft, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3450,12 +2939,12 @@ static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int3 tSQLExpr* pRight = pExpr->pRight; if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) { - int32_t ret = validateArithmeticSQLExpr(pRight, pSchema, numOfCols, pList); + int32_t ret = validateArithmeticSQLExpr(pRight, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } } else { - int32_t ret = validateSQLExpr(pRight, pSchema, numOfCols, pList); + int32_t ret = validateSQLExpr(pRight, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3522,7 +3011,7 @@ static void exchangeExpr(tSQLExpr* pExpr) { } } -static bool validateJoinExprNode(SSqlCmd* pCmd, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) { +static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) { const char* msg1 = "illegal column name"; const char* msg2 = "= is expected in join expression"; const char* msg3 = "join column must have same type"; @@ -3537,40 +3026,40 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, tSQLExpr* pExpr, SColumnIndex* p } if (pExpr->nSQLOptr != TK_EQ) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(pQueryInfo->msg, msg2); return false; } SColumnIndex rightIndex = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pRight->colInfo, pCmd, &rightIndex) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg1); + if (getColumnIndexByName(&pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) { + invalidSqlErrMsg(pQueryInfo->msg, msg1); return false; } // todo extract function - SMeterMetaInfo* pLeftMeterMeta = tscGetMeterMetaInfo(pCmd, pLeftIndex->tableIndex); + SMeterMetaInfo* pLeftMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pLeftIndex->tableIndex); SSchema* pLeftSchema = tsGetSchema(pLeftMeterMeta->pMeterMeta); int16_t leftType = pLeftSchema[pLeftIndex->columnIndex].type; - SMeterMetaInfo* pRightMeterMeta = tscGetMeterMetaInfo(pCmd, rightIndex.tableIndex); + SMeterMetaInfo* pRightMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, rightIndex.tableIndex); SSchema* pRightSchema = tsGetSchema(pRightMeterMeta->pMeterMeta); int16_t rightType = pRightSchema[rightIndex.columnIndex].type; if (leftType != rightType) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(pQueryInfo->msg, msg3); return false; } else if (pLeftIndex->tableIndex == rightIndex.tableIndex) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(pQueryInfo->msg, msg4); return false; } else if (leftType == TSDB_DATA_TYPE_BINARY || leftType == TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(pQueryInfo->msg, msg6); return false; } // table to table/ super table to super table are allowed - if (UTIL_METER_IS_METRIC(pLeftMeterMeta) != UTIL_METER_IS_METRIC(pRightMeterMeta)) { - invalidSqlErrMsg(pCmd, msg5); + if (UTIL_METER_IS_SUPERTABLE(pLeftMeterMeta) != UTIL_METER_IS_SUPERTABLE(pRightMeterMeta)) { + invalidSqlErrMsg(pQueryInfo->msg, msg5); return false; } @@ -3589,10 +3078,10 @@ static bool validTableNameOptr(tSQLExpr* pExpr) { return false; } -static int32_t setExprToCond(SSqlCmd* pCmd, tSQLExpr** parent, tSQLExpr* pExpr, const char* msg, int32_t parentOptr) { +static int32_t setExprToCond(tSQLExpr** parent, tSQLExpr* pExpr, const char* msg, int32_t parentOptr, char* msgBuf) { if (*parent != NULL) { if (parentOptr == TK_OR && msg != NULL) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(msgBuf, msg); } *parent = tSQLExprCreate((*parent), pExpr, parentOptr); @@ -3603,7 +3092,7 @@ static int32_t setExprToCond(SSqlCmd* pCmd, tSQLExpr** parent, tSQLExpr* pExpr, return TSDB_CODE_SUCCESS; } -static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, +static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, int32_t parentOptr) { const char* msg1 = "meter query cannot use tags filter"; const char* msg2 = "illegal column name"; @@ -3619,23 +3108,23 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* int32_t ret = TSDB_CODE_SUCCESS; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); + if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } assert(isExprDirectParentOfLeaftNode(*pExpr)); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range - if (!validateJoinExprNode(pCmd, *pExpr, &index)) { + if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) { return TSDB_CODE_INVALID_SQL; } // set join query condition if (pRight->nSQLOptr == TK_ID) { // no need to keep the timestamp join condition - pCmd->type |= TSDB_QUERY_TYPE_JOIN_QUERY; + pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; pCondExpr->tsJoin = true; /* @@ -3644,7 +3133,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* */ tSQLExprDestroy(*pExpr); } else { - ret = setExprToCond(pCmd, &pCondExpr->pTimewindow, *pExpr, msg3, parentOptr); + ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg); } *pExpr = NULL; // remove this expression @@ -3653,7 +3142,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // query on tags // check for tag query condition if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // check for like expression @@ -3666,14 +3155,14 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* if ((!isTablenameToken(&pLeft->colInfo)) && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_BINARY && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_NCHAR) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } // in case of in operator, keep it in a seperate attribute if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { if (!validTableNameOptr(*pExpr)) { - return invalidSqlErrMsg(pCmd, msg7); + return invalidSqlErrMsg(pQueryInfo->msg, msg7); } if (pCondExpr->pTableCond == NULL) { @@ -3681,23 +3170,23 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr->relType = parentOptr; pCondExpr->tableCondIndex = index.tableIndex; } else { - return invalidSqlErrMsg(pCmd, msg6); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } *type = TSQL_EXPR_TBNAME; *pExpr = NULL; } else { if (pRight->nSQLOptr == TK_ID) { // join on tag columns for stable query - if (!validateJoinExprNode(pCmd, *pExpr, &index)) { + if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) { return TSDB_CODE_INVALID_SQL; } if (pCondExpr->pJoinExpr != NULL) { - return invalidSqlErrMsg(pCmd, msg4); + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } - pCmd->type |= TSDB_QUERY_TYPE_JOIN_QUERY; - ret = setExprToCond(pCmd, &pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr); + pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; + ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); *pExpr = NULL; } else { // do nothing @@ -3712,17 +3201,18 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* *type = TSQL_EXPR_COLUMN; if (pRight->nSQLOptr == TK_ID) { // other column cannot be served as the join column - return invalidSqlErrMsg(pCmd, msg5); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } - ret = setExprToCond(pCmd, &pCondExpr->pColumnCond, *pExpr, NULL, parentOptr); + ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg); *pExpr = NULL; // remove it from expr tree } return ret; } -int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, int32_t parentOptr) { +int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, + int32_t parentOptr) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -3740,12 +3230,12 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t rightType = -1; if (!isExprDirectParentOfLeaftNode(*pExpr)) { - int32_t ret = getQueryCondExpr(pCmd, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr); + int32_t ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr); if (ret != TSDB_CODE_SUCCESS) { return ret; } - ret = getQueryCondExpr(pCmd, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr); + ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3756,7 +3246,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, */ if (leftType != rightType) { if ((*pExpr)->nSQLOptr == TK_OR && (leftType + rightType != TSQL_EXPR_TBNAME + TSQL_EXPR_TAG)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } @@ -3766,7 +3256,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, exchangeExpr(*pExpr); - return handleExprInQueryCond(pCmd, pExpr, pCondExpr, type, parentOptr); + return handleExprInQueryCond(pQueryInfo, pExpr, pCondExpr, type, parentOptr); } static void doCompactQueryExpr(tSQLExpr** pExpr) { @@ -3800,12 +3290,12 @@ static void doCompactQueryExpr(tSQLExpr** pExpr) { } } -static void doExtractExprForSTable(tSQLExpr** pExpr, SSqlCmd* pCmd, tSQLExpr** pOut, int32_t tableIndex) { +static void doExtractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) { if (isExprDirectParentOfLeaftNode(*pExpr)) { tSQLExpr* pLeft = (*pExpr)->pLeft; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return; } @@ -3822,16 +3312,16 @@ static void doExtractExprForSTable(tSQLExpr** pExpr, SSqlCmd* pCmd, tSQLExpr** p } else { *pOut = tSQLExprCreate(NULL, NULL, (*pExpr)->nSQLOptr); - doExtractExprForSTable(&(*pExpr)->pLeft, pCmd, &((*pOut)->pLeft), tableIndex); - doExtractExprForSTable(&(*pExpr)->pRight, pCmd, &((*pOut)->pRight), tableIndex); + doExtractExprForSTable(&(*pExpr)->pLeft, pQueryInfo, &((*pOut)->pLeft), tableIndex); + doExtractExprForSTable(&(*pExpr)->pRight, pQueryInfo, &((*pOut)->pRight), tableIndex); } } -static tSQLExpr* extractExprForSTable(tSQLExpr** pExpr, SSqlCmd* pCmd, int32_t tableIndex) { +static tSQLExpr* extractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex) { tSQLExpr* pResExpr = NULL; if (*pExpr != NULL) { - doExtractExprForSTable(pExpr, pCmd, &pResExpr, tableIndex); + doExtractExprForSTable(pExpr, pQueryInfo, &pResExpr, tableIndex); doCompactQueryExpr(&pResExpr); } @@ -3851,24 +3341,24 @@ int tableNameCompar(const void* lhs, const void* rhs) { return ret > 0 ? 1 : -1; } -static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_t tableCondIndex, SStringBuilder* sb) { - SSqlCmd* pCmd = &pSql->cmd; - const char* msg = "meter name too long"; +static int32_t setTableCondForMetricQuery(SQueryInfo* pQueryInfo, const char* account, tSQLExpr* pExpr, + int16_t tableCondIndex, SStringBuilder* sb) { + const char* msg = "table name too long"; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableCondIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableCondIndex); - STagCond* pTagCond = &pSql->cmd.tagCond; + STagCond* pTagCond = &pQueryInfo->tagCond; pTagCond->tbnameCond.uid = pMeterMetaInfo->pMeterMeta->uid; assert(pExpr->nSQLOptr == TK_LIKE || pExpr->nSQLOptr == TK_IN); if (pExpr->nSQLOptr == TK_LIKE) { char* str = taosStringBuilderGetResult(sb, NULL); - pCmd->tagCond.tbnameCond.cond = strdup(str); + pQueryInfo->tagCond.tbnameCond.cond = strdup(str); return TSDB_CODE_SUCCESS; } @@ -3893,7 +3383,6 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_ num = j; SSQLToken dbToken = extractDBName(pMeterMetaInfo->name, db); - char* acc = getAccountId(pSql); for (int32_t i = 0; i < num; ++i) { if (i >= 1) { @@ -3904,12 +3393,12 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_ int32_t xlen = strlen(segments[i]); SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; - int32_t ret = setObjFullName(idBuf, acc, &dbToken, &t, &xlen); + int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); if (ret != TSDB_CODE_SUCCESS) { taosStringBuilderDestroy(&sb1); tfree(segments); - invalidSqlErrMsg(pCmd, msg); + invalidSqlErrMsg(pQueryInfo->msg, msg); return ret; } @@ -3917,16 +3406,16 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_ } char* str = taosStringBuilderGetResult(&sb1, NULL); - pCmd->tagCond.tbnameCond.cond = strdup(str); + pQueryInfo->tagCond.tbnameCond.cond = strdup(str); taosStringBuilderDestroy(&sb1); tfree(segments); return TSDB_CODE_SUCCESS; } -static bool validateFilterExpr(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { - SColumnBase* pColBase = &pCmd->colList.pColList[i]; +static bool validateFilterExpr(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { + SColumnBase* pColBase = &pQueryInfo->colList.pColList[i]; for (int32_t j = 0; j < pColBase->numOfFilters; ++j) { SColumnFilterInfo* pColFilter = &pColBase->filterInfo[j]; @@ -3948,7 +3437,7 @@ static bool validateFilterExpr(SSqlCmd* pCmd) { return true; } -static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { +static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) { const char* msg0 = "invalid timestamp"; const char* msg1 = "only one time stamp window allowed"; @@ -3958,19 +3447,19 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { if (!isExprDirectParentOfLeaftNode(pExpr)) { if (pExpr->nSQLOptr == TK_OR) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - getTimeRangeFromExpr(pCmd, pExpr->pLeft); + getTimeRangeFromExpr(pQueryInfo, pExpr->pLeft); - return getTimeRangeFromExpr(pCmd, pExpr->pRight); + return getTimeRangeFromExpr(pQueryInfo, pExpr->pRight); } else { SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; tSQLExpr* pRight = pExpr->pRight; @@ -3979,45 +3468,45 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { TSKEY etime = INT64_MAX; if (getTimeRange(&stime, &etime, pRight, pExpr->nSQLOptr, pMeterMeta->precision) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg0); + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } // update the timestamp query range - if (pCmd->stime < stime) { - pCmd->stime = stime; + if (pQueryInfo->stime < stime) { + pQueryInfo->stime = stime; } - if (pCmd->etime > etime) { - pCmd->etime = etime; + if (pQueryInfo->etime > etime) { + pQueryInfo->etime = etime; } } return TSDB_CODE_SUCCESS; } -static int32_t validateJoinExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr) { +static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { const char* msg1 = "super table join requires tags column"; const char* msg2 = "timestamp join condition missing"; const char* msg3 = "condition missing for join query"; - if (!QUERY_IS_JOIN_QUERY(pCmd->type)) { - if (pCmd->numOfTables == 1) { + if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { + if (pQueryInfo->numOfTables == 1) { return TSDB_CODE_SUCCESS; } else { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // for stable join, tag columns - // must be present for join + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { // for stable join, tag columns + // must be present for join if (pCondExpr->pJoinExpr == NULL) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } if (!pCondExpr->tsJoin) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } return TSDB_CODE_SUCCESS; @@ -4045,42 +3534,42 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) { } } -static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SCondExpr* pCondExpr) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (QUERY_IS_JOIN_QUERY(pCmd->type) && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { +static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { SColumnIndex index = {0}; - getColumnIndexByNameEx(&pCondExpr->pJoinExpr->pLeft->colInfo, pCmd, &index); - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + getColumnIndexByName(&pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); int32_t columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); + addRequiredTagColumn(pQueryInfo, columnInfo, index.tableIndex); - getColumnIndexByNameEx(&pCondExpr->pJoinExpr->pRight->colInfo, pCmd, &index); - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + getColumnIndexByName(&pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); + addRequiredTagColumn(pQueryInfo, columnInfo, index.tableIndex); } } -static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr, tSQLExpr** pExpr) { +static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) { int32_t ret = TSDB_CODE_SUCCESS; if (pCondExpr->pTagCond != NULL) { - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - tSQLExpr* p1 = extractExprForSTable(pExpr, pCmd, i); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + tSQLExpr* p1 = extractExprForSTable(pExpr, pQueryInfo, i); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); char c[TSDB_MAX_TAGS_LEN] = {0}; char* str = c; - if ((ret = getTagCondString(pCmd, p1, &str)) != TSDB_CODE_SUCCESS) { + if ((ret = getTagCondString(p1, &str)) != TSDB_CODE_SUCCESS) { return ret; } - tsSetMetricQueryCond(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); + tsSetMetricQueryCond(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); doCompactQueryExpr(pExpr); tSQLExprDestroy(p1); @@ -4091,7 +3580,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr, tSQLExpr return ret; } -int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { +int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -4101,20 +3590,19 @@ int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { int32_t ret = TSDB_CODE_SUCCESS; - SSqlCmd* pCmd = &pSql->cmd; - pCmd->stime = 0; - pCmd->etime = INT64_MAX; + pQueryInfo->stime = 0; + pQueryInfo->etime = INT64_MAX; // tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space SStringBuilder sb = {0}; SCondExpr condExpr = {0}; if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } int32_t type = 0; - if ((ret = getQueryCondExpr(pCmd, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { + if ((ret = getQueryCondExpr(pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { return ret; } @@ -4124,46 +3612,46 @@ int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { condExpr.pTagCond = (*pExpr); // 1. check if it is a join query - if ((ret = validateJoinExpr(pCmd, &condExpr)) != TSDB_CODE_SUCCESS) { + if ((ret = validateJoinExpr(pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) { return ret; } // 2. get the query time range - if ((ret = getTimeRangeFromExpr(pCmd, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { + if ((ret = getTimeRangeFromExpr(pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { return ret; } // 3. get the tag query condition - if ((ret = getTagQueryCondExpr(pCmd, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { + if ((ret = getTagQueryCondExpr(pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { return ret; } // 4. get the table name query condition - if ((ret = getTablenameCond(pCmd, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { + if ((ret = getTablenameCond(pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { return ret; } // 5. other column query condition - if ((ret = getColumnQueryCondInfo(pCmd, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { + if ((ret = getColumnQueryCondInfo(pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { return ret; } // 6. join condition - if ((ret = getJoinCondInfo(pSql, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { + if ((ret = getJoinCondInfo(pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { return ret; } // 7. query condition for table name - pCmd->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; + pQueryInfo->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; - ret = setTableCondForMetricQuery(pSql, condExpr.pTableCond, condExpr.tableCondIndex, &sb); + ret = setTableCondForMetricQuery(pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb); taosStringBuilderDestroy(&sb); - if (!validateFilterExpr(pCmd)) { - return invalidSqlErrMsg(pCmd, msg); + if (!validateFilterExpr(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg); } - doAddJoinTagsColumnsIntoTagList(pCmd, &condExpr); + doAddJoinTagsColumnsIntoTagList(pQueryInfo, &condExpr); cleanQueryExpr(&condExpr); return ret; @@ -4258,11 +3746,11 @@ int32_t getTimeRange(int64_t* stime, int64_t* etime, tSQLExpr* pRight, int32_t o return TSDB_CODE_SUCCESS; } -int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { +int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) { const char rep[] = {'(', ')', '*', ',', '.', '/', '\\', '+', '-', '%', ' '}; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - char* fieldName = tscFieldInfoGetField(pCmd, i)->name; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + char* fieldName = tscFieldInfoGetField(pQueryInfo, i)->name; for (int32_t j = 0; j < TSDB_COL_NAME_LEN && fieldName[j] != 0; ++j) { for (int32_t k = 0; k < tListLen(rep); ++k) { if (fieldName[j] == rep[k]) { @@ -4276,12 +3764,12 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { } // the column name may be identical, here check again - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - char* fieldName = tscFieldInfoGetField(pCmd, i)->name; - for (int32_t j = i + 1; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - if (strncasecmp(fieldName, tscFieldInfoGetField(pCmd, j)->name, TSDB_COL_NAME_LEN) == 0) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + char* fieldName = tscFieldInfoGetField(pQueryInfo, i)->name; + for (int32_t j = i + 1; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + if (strncasecmp(fieldName, tscFieldInfoGetField(pQueryInfo, j)->name, TSDB_COL_NAME_LEN) == 0) { const char* msg = "duplicated column name in new table"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } } @@ -4289,115 +3777,124 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { return TSDB_CODE_SUCCESS; } -int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL) { +int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) { tVariantList* pFillToken = pQuerySQL->fillType; tVariantListItem* pItem = &pFillToken->a[0]; const int32_t START_INTERPO_COL_IDX = 1; - const char* msg = "illegal value or data overflow"; - const char* msg1 = "value is expected"; - const char* msg2 = "invalid fill option"; + + const char* msg = "illegal value or data overflow"; + const char* msg1 = "value is expected"; + const char* msg2 = "invalid fill option"; if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); + } + + if (pQueryInfo->defaultVal == NULL) { + pQueryInfo->defaultVal = calloc(pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(int64_t)); + if (pQueryInfo->defaultVal == NULL) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } } if (strncasecmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) { - pCmd->interpoType = TSDB_INTERPO_NONE; + pQueryInfo->interpoType = TSDB_INTERPO_NONE; } else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) { - pCmd->interpoType = TSDB_INTERPO_NULL; - for (int32_t i = START_INTERPO_COL_IDX; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); - setNull((char*)&pCmd->defaultVal[i], pFields->type, pFields->bytes); + pQueryInfo->interpoType = TSDB_INTERPO_NULL; + for (int32_t i = START_INTERPO_COL_IDX; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD* pFields = tscFieldInfoGetField(pQueryInfo, i); + setNull((char*)&pQueryInfo->defaultVal[i], pFields->type, pFields->bytes); } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { - pCmd->interpoType = TSDB_INTERPO_PREV; + pQueryInfo->interpoType = TSDB_INTERPO_PREV; } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { - // not support yet - pCmd->interpoType = TSDB_INTERPO_LINEAR; + pQueryInfo->interpoType = TSDB_INTERPO_LINEAR; } else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) { - pCmd->interpoType = TSDB_INTERPO_SET_VALUE; + pQueryInfo->interpoType = TSDB_INTERPO_SET_VALUE; if (pFillToken->nExpr == 1) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } int32_t startPos = 1; int32_t numOfFillVal = pFillToken->nExpr - 1; /* for point interpolation query, we do not have the timestamp column */ - if (tscIsPointInterpQuery(pCmd)) { + if (tscIsPointInterpQuery(pQueryInfo)) { startPos = 0; - if (numOfFillVal > pCmd->fieldsInfo.numOfOutputCols) { - numOfFillVal = pCmd->fieldsInfo.numOfOutputCols; + if (numOfFillVal > pQueryInfo->fieldsInfo.numOfOutputCols) { + numOfFillVal = pQueryInfo->fieldsInfo.numOfOutputCols; } } else { - numOfFillVal = - (pFillToken->nExpr > pCmd->fieldsInfo.numOfOutputCols) ? pCmd->fieldsInfo.numOfOutputCols : pFillToken->nExpr; + numOfFillVal = (pFillToken->nExpr > pQueryInfo->fieldsInfo.numOfOutputCols) + ? pQueryInfo->fieldsInfo.numOfOutputCols + : pFillToken->nExpr; } int32_t j = 1; for (int32_t i = startPos; i < numOfFillVal; ++i, ++j) { - TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); - - int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pCmd->defaultVal[i], pFields->type); - if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - + TAOS_FIELD* pFields = tscFieldInfoGetField(pQueryInfo, i); + if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setNull((char*)(&pCmd->defaultVal[i]), pFields->type, pFields->bytes); + setNull((char*)(&pQueryInfo->defaultVal[i]), pFields->type, pFields->bytes); + continue; + } + + int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->defaultVal[i], pFields->type); + if (ret != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } - if ((pFillToken->nExpr < pCmd->fieldsInfo.numOfOutputCols) || - ((pFillToken->nExpr - 1 < pCmd->fieldsInfo.numOfOutputCols) && (tscIsPointInterpQuery(pCmd)))) { + if ((pFillToken->nExpr < pQueryInfo->fieldsInfo.numOfOutputCols) || + ((pFillToken->nExpr - 1 < pQueryInfo->fieldsInfo.numOfOutputCols) && (tscIsPointInterpQuery(pQueryInfo)))) { tVariantListItem* lastItem = &pFillToken->a[pFillToken->nExpr - 1]; - for (int32_t i = numOfFillVal; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); - tVariantDump(&lastItem->pVar, (char*)&pCmd->defaultVal[i], pFields->type); + for (int32_t i = numOfFillVal; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD* pFields = tscFieldInfoGetField(pQueryInfo, i); + tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->defaultVal[i], pFields->type); if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setNull((char*)(&pCmd->defaultVal[i]), pFields->type, pFields->bytes); + setNull((char*)(&pQueryInfo->defaultVal[i]), pFields->type, pFields->bytes); } } } } else { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } return TSDB_CODE_SUCCESS; } -static void setDefaultOrderInfo(SSqlCmd* pCmd) { +static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { /* set default timestamp order information for all queries */ - pCmd->order.order = TSQL_SO_ASC; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + pQueryInfo->order.order = TSQL_SO_ASC; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - if (isTopBottomQuery(pCmd)) { - pCmd->order.order = TSQL_SO_ASC; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + if (isTopBottomQuery(pQueryInfo)) { + pQueryInfo->order.order = TSQL_SO_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } else { - pCmd->order.orderColId = -1; + pQueryInfo->order.orderColId = -1; } /* for metric query, set default ascending order for group output */ - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - pCmd->groupbyExpr.orderType = TSQL_SO_ASC; + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + pQueryInfo->groupbyExpr.orderType = TSQL_SO_ASC; } } -int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema, int32_t numOfCols) { +int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) { const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; const char* msg2 = "only support order by primary timestamp and queried column"; const char* msg3 = "only support order by primary timestamp and first tag in groupby clause"; - setDefaultOrderInfo(pCmd); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + setDefaultOrderInfo(pQueryInfo); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); if (pQuerySql->pSortOrder == NULL) { return TSDB_CODE_SUCCESS; @@ -4413,11 +3910,11 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema */ if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { if (pSortorder->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg0); + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } } else { if (pSortorder->nExpr > 2) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } } @@ -4432,9 +3929,9 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema SSQLToken columnName = {pVar->nLen, pVar->nType, pVar->pz}; SColumnIndex index = {0}; - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // metric query - if (getColumnIndexByNameEx(&columnName, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { // metric query + if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } bool orderByTags = false; @@ -4442,7 +3939,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { int32_t relTagIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - if (relTagIndex == pCmd->groupbyExpr.columnInfo[0].colIdx) { + if (relTagIndex == pQueryInfo->groupbyExpr.columnInfo[0].colIdx) { orderByTags = true; } } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { @@ -4453,83 +3950,83 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema orderByTS = true; } - if (!(orderByTags || orderByTS) && !isTopBottomQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg3); + if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } else { assert(!(orderByTags && orderByTS)); } if (pSortorder->nExpr == 1) { if (orderByTags) { - pCmd->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - pCmd->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; - } else if (isTopBottomQuery(pCmd)) { + pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - SSqlExpr* pExpr = tscSqlExprGet(pCmd, 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); assert(pExpr->functionId == TSDB_FUNC_TS); - pExpr = tscSqlExprGet(pCmd, 1); + pExpr = tscSqlExprGet(pQueryInfo, 1); if (pExpr->colInfo.colIdx != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; - pCmd->order.orderColId = pSchema[index.columnIndex].colId; + pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } else { - pCmd->order.order = pSortorder->a[0].sortOrder; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + pQueryInfo->order.order = pSortorder->a[0].sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } if (pSortorder->nExpr == 2) { if (orderByTags) { - pCmd->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - pCmd->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; } else { - pCmd->order.order = pSortorder->a[0].sortOrder; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + pQueryInfo->order.order = pSortorder->a[0].sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } tVariant* pVar2 = &pSortorder->a[1].pVar; SSQLToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; - if (getColumnIndexByNameEx(&cname, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (getColumnIndexByName(&cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } else { - pCmd->order.order = pSortorder->a[1].sortOrder; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + pQueryInfo->order.order = pSortorder->a[1].sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } } else { // meter query - if (getColumnIndexByNameEx(&columnName, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg2); + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - if (isTopBottomQuery(pCmd)) { + if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - SSqlExpr* pExpr = tscSqlExprGet(pCmd, 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); assert(pExpr->functionId == TSDB_FUNC_TS); - pExpr = tscSqlExprGet(pCmd, 1); + pExpr = tscSqlExprGet(pQueryInfo, 1); if (pExpr->colInfo.colIdx != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; - pCmd->order.orderColId = pSchema[index.columnIndex].colId; + pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } - pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; } return TSDB_CODE_SUCCESS; @@ -4538,64 +4035,58 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const int32_t DEFAULT_TABLE_INDEX = 0; - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, DEFAULT_TABLE_INDEX); + const char* msg1 = "invalid table name"; + const char* msg2 = "table name too long"; + const char* msg3 = "manipulation of tag available for super table"; + const char* msg4 = "set tag value only available for table"; + const char* msg5 = "only support add one tag"; + const char* msg6 = "column can only be modified by super table"; + SSqlCmd* pCmd = &pSql->cmd; SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo; - pCmd->command = TSDB_SQL_ALTER_TABLE; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, DEFAULT_TABLE_INDEX); if (tscValidateName(&(pAlterSQL->name)) != TSDB_CODE_SUCCESS) { - const char* msg = "invalid table name"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - if (setMeterID(pSql, &(pAlterSQL->name), 0) != TSDB_CODE_SUCCESS) { - const char* msg = "table name too long"; - return invalidSqlErrMsg(pCmd, msg); + if (setMeterID(pMeterMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo->name, DEFAULT_TABLE_INDEX); + int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo); if (ret != TSDB_CODE_SUCCESS) { return ret; } SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; - SSchema* pSchema = tsGetSchema(pMeterMeta); - if (pInfo->sqlType == ALTER_TABLE_TAGS_ADD || pInfo->sqlType == ALTER_TABLE_TAGS_DROP || - pInfo->sqlType == ALTER_TABLE_TAGS_CHG) { + if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN || + pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - const char* msg = "manipulation of tag available for metric"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - } else if ((pInfo->sqlType == ALTER_TABLE_TAGS_SET) && (UTIL_METER_IS_METRIC(pMeterMetaInfo))) { - const char* msg = "set tag value only available for table"; - return invalidSqlErrMsg(pCmd, msg); - } else if ((pInfo->sqlType == ALTER_TABLE_ADD_COLUMN || pInfo->sqlType == ALTER_TABLE_DROP_COLUMN) && + } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo))) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) && UTIL_METER_IS_CREATE_FROM_METRIC(pMeterMetaInfo)) { - const char* msg = "column can only be modified by metric"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } - if (pInfo->sqlType == ALTER_TABLE_TAGS_ADD) { - pCmd->count = TSDB_ALTER_TABLE_ADD_TAG_COLUMN; - + if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { tFieldList* pFieldList = pAlterSQL->pAddColumns; if (pFieldList->nField > 1) { - const char* msg = "only support add one tag"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } if (!validateOneTags(pCmd, &pFieldList->p[0])) { return TSDB_CODE_INVALID_SQL; } - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, 0, &pFieldList->p[0]); - pCmd->numOfCols = 1; // only one column - - } else if (pInfo->sqlType == ALTER_TABLE_TAGS_DROP) { - pCmd->count = TSDB_ALTER_TABLE_DROP_TAG_COLUMN; - + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, 0, &pFieldList->p[0]); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { const char* msg1 = "no tags can be dropped"; const char* msg2 = "only support one tag"; const char* msg3 = "tag name too long"; @@ -4603,48 +4094,40 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg5 = "primary tag cannot be dropped"; if (pMeterMeta->numOfTags == 1) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // numOfTags == 1 if (pAlterSQL->varList->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tVariantListItem* pItem = &pAlterSQL->varList->a[0]; if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - int32_t idx = -1; - for (int32_t i = 0; i < pMeterMeta->numOfTags; ++i) { - int32_t tagIdx = i + pMeterMeta->numOfColumns; - char* tagName = pSchema[tagIdx].name; - size_t nLen = strlen(tagName); + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SSQLToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING}; - if ((strncasecmp(tagName, pItem->pVar.pz, nLen) == 0) && (pItem->pVar.nLen == nLen)) { - idx = i; - break; - } + if (getColumnIndexByName(&name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - if (idx == -1) { - return invalidSqlErrMsg(pCmd, msg4); - } else if (idx == 0) { - return invalidSqlErrMsg(pCmd, msg5); + if (index.columnIndex < pMeterMeta->numOfColumns) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } else if (index.columnIndex == 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } - char name[128] = {0}; - strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 1; // only one column - - } else if (pInfo->sqlType == ALTER_TABLE_TAGS_CHG) { + char name1[128] = {0}; + strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name1, + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { const char* msg1 = "tag name too long"; const char* msg2 = "invalid tag name"; - pCmd->count = TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN; tVariantList* pVarList = pAlterSQL->varList; if (pVarList->nExpr > 2) { return TSDB_CODE_INVALID_SQL; @@ -4654,177 +4137,148 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pDstItem = &pAlterSQL->varList->a[1]; if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } if (pSrcItem->pVar.nType != TSDB_DATA_TYPE_BINARY || pDstItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER; SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER; SSQLToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING}; - if (getColumnIndexByNameEx(&srcToken, pCmd, &srcIndex) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } SSQLToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING}; - if (getColumnIndexByNameEx(&destToken, pCmd, &destIndex) == TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } char name[128] = {0}; strncpy(name, pVarList->a[0].pVar.pz, pVarList->a[0].pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); memset(name, 0, tListLen(name)); strncpy(name, pVarList->a[1].pVar.pz, pVarList->a[1].pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 2; - } else if (pInfo->sqlType == ALTER_TABLE_TAGS_SET) { - const char* msg0 = "tag name too long"; + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 1, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { const char* msg1 = "invalid tag value"; - const char* msg2 = "invalid tag name"; + const char* msg2 = "update normal column not supported"; const char* msg3 = "tag value too long"; - pCmd->count = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; - - // Note: update can only be applied to meter not metric. - // the following is handle display tags value for meters created according to metric - + // Note: update can only be applied to table not super table. + // the following is handle display tags value for meters created according to super table tVariantList* pVarList = pAlterSQL->varList; tVariant* pTagName = &pVarList->a[0].pVar; - if (pTagName->nLen > TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg0); - } - - int32_t tagsIndex = -1; - SSchema* pTagsSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); - - for (int32_t i = 0; i < pMeterMetaInfo->pMeterMeta->numOfTags; ++i) { - if (strcmp(pTagName->pz, pTagsSchema[i].name) == 0 && strlen(pTagsSchema[i].name) == pTagName->nLen) { - tagsIndex = i; - break; - } + SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + SSQLToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen}; + if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - if (tagsIndex == -1) { - return invalidSqlErrMsg(pCmd, msg2); + if (columnIndex.columnIndex < pMeterMeta->numOfColumns) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - if (tVariantDump(&pVarList->a[1].pVar, pCmd->payload, pTagsSchema[tagsIndex].type) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + SSchema* pTagsSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, columnIndex.columnIndex); + if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data /*pCmd->payload*/, pTagsSchema->type) != + TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // validate the length of binary - if ((pTagsSchema[tagsIndex].type == TSDB_DATA_TYPE_BINARY || pTagsSchema[tagsIndex].type == TSDB_DATA_TYPE_NCHAR) && - pVarList->a[1].pVar.nLen > pTagsSchema[tagsIndex].bytes) { - return invalidSqlErrMsg(pCmd, msg3); + if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) && + pVarList->a[1].pVar.nLen > pTagsSchema->bytes) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - char name[128] = {0}; - strncpy(name, pTagName->pz, pTagName->nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 1; - } else if (pInfo->sqlType == ALTER_TABLE_ADD_COLUMN) { - pCmd->count = TSDB_ALTER_TABLE_ADD_COLUMN; + char name1[128] = {0}; + strncpy(name1, pTagName->pz, pTagName->nLen); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name1, + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) { tFieldList* pFieldList = pAlterSQL->pAddColumns; if (pFieldList->nField > 1) { const char* msg = "only support add one column"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } if (!validateOneColumn(pCmd, &pFieldList->p[0])) { return TSDB_CODE_INVALID_SQL; } - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, 0, &pFieldList->p[0]); - pCmd->numOfCols = 1; // only one column - } else if (pInfo->sqlType == ALTER_TABLE_DROP_COLUMN) { - pCmd->count = TSDB_ALTER_TABLE_DROP_COLUMN; - + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, 0, &pFieldList->p[0]); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) { const char* msg1 = "no columns can be dropped"; const char* msg2 = "only support one column"; - const char* msg3 = "column name too long"; const char* msg4 = "illegal column name"; - const char* msg5 = "primary timestamp column cannot be dropped"; + const char* msg3 = "primary timestamp column cannot be dropped"; if (pMeterMeta->numOfColumns == TSDB_MIN_COLUMNS) { // - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } if (pAlterSQL->varList->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } tVariantListItem* pItem = &pAlterSQL->varList->a[0]; - if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg3); - } - int32_t idx = -1; - for (int32_t i = 0; i < pMeterMeta->numOfColumns; ++i) { - char* colName = pSchema[i].name; - size_t len = strlen(colName); - - if ((strncasecmp(colName, pItem->pVar.pz, len) == 0) && (len == pItem->pVar.nLen)) { - idx = i; - break; - } + SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + SSQLToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; + if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } - if (idx == -1) { - return invalidSqlErrMsg(pCmd, msg4); - } else if (idx == 0) { - return invalidSqlErrMsg(pCmd, msg5); + if (columnIndex.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - char name[128] = {0}; - strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 1; // only one column + char name1[128] = {0}; + strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name1, + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); } return TSDB_CODE_SUCCESS; } -int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd) { +int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo) { const char* msg0 = "sample interval can not be less than 10ms."; const char* msg1 = "functions not allowed in select clause"; - if (pCmd->nAggTimeInterval != 0 && pCmd->nAggTimeInterval < 10) { - return invalidSqlErrMsg(pCmd, msg0); + if (pQueryInfo->nAggTimeInterval != 0 && pQueryInfo->nAggTimeInterval < 10) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functId = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId; if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } return TSDB_CODE_SUCCESS; } -int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd) { +int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo) { bool isProjectionFunction = false; const char* msg1 = "column projection is not compatible with interval"; // multi-output set/ todo refactor - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); // projection query on primary timestamp, the selectivity function needs to be present. if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool hasSelectivity = false; - for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - SSqlExpr* pEx = tscSqlExprGet(pCmd, j); + for (int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr* pEx = tscSqlExprGet(pQueryInfo, j); if ((aAggs[pEx->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) { hasSelectivity = true; break; @@ -4843,7 +4297,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd) { } if (isProjectionFunction) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(pQueryInfo->msg, msg1); } return isProjectionFunction == true ? TSDB_CODE_INVALID_SQL : TSDB_CODE_SUCCESS; @@ -4976,64 +4430,80 @@ int32_t validateColumnName(char* name) { return TSDB_CODE_SUCCESS; } -bool hasTimestampForPointInterpQuery(SSqlCmd* pCmd) { - if (!tscIsPointInterpQuery(pCmd)) { +bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo) { + if (!tscIsPointInterpQuery(pQueryInfo)) { return true; } - return (pCmd->stime == pCmd->etime) && (pCmd->stime != 0); + return (pQueryInfo->stime == pQueryInfo->etime) && (pQueryInfo->stime != 0); } -int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL* pQuerySql, SSqlObj* pSql) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); const char* msg0 = "soffset/offset can not be less than 0"; const char* msg1 = "slimit/soffset only available for STable query"; const char* msg2 = "function not supported on table"; const char* msg3 = "slimit/soffset can not apply to projection query"; - + // handle the limit offset value, validate the limit - pCmd->limit = pQuerySql->limit; - pCmd->globalLimit = pCmd->limit.limit; + pQueryInfo->limit = pQuerySql->limit; + pQueryInfo->clauseLimit = pQueryInfo->limit.limit; - pCmd->slimit = pQuerySql->slimit; + pQueryInfo->slimit = pQuerySql->slimit; - if (pCmd->slimit.offset < 0 || pCmd->limit.offset < 0) { - return invalidSqlErrMsg(pCmd, msg0); + if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } - if (pCmd->limit.limit == 0) { + if (pQueryInfo->limit.limit == 0) { tscTrace("%p limit 0, no output result", pSql); - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; } - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { bool queryOnTags = false; - if (tscQueryOnlyMetricTags(pCmd, &queryOnTags) != TSDB_CODE_SUCCESS) { + if (tscQueryOnlyMetricTags(pQueryInfo, &queryOnTags) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } if (queryOnTags == true) { // local handle the metric tag query - pCmd->command = TSDB_SQL_RETRIEVE_TAGS; + pQueryInfo->command = TSDB_SQL_RETRIEVE_TAGS; } else { - if (tscProjectionQueryOnMetric(pCmd) && (pCmd->slimit.limit > 0 || pCmd->slimit.offset > 0)) { - return invalidSqlErrMsg(pCmd, msg3); + if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { + if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); + } + + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { +// if (pQueryInfo->order.orderColId >= 0) { +// if (pQueryInfo->limit.limit == -1) { +// return invalidSqlErrMsg(pQueryInfo->msg, msg4); +// } else if (pQueryInfo->limit.limit > 10000) { // the result set can not be larger than 10000 +// //todo use global config parameter +// return invalidSqlErrMsg(pQueryInfo->msg, msg5); +// } +// } + + pQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; // for projection query on super table, all queries are subqueries + } } } - if (pCmd->slimit.limit == 0) { + if (pQueryInfo->slimit.limit == 0) { tscTrace("%p limit 0, no output result", pSql); - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; return TSDB_CODE_SUCCESS; } /* - * get the distribution of all tables among available virtual nodes that satisfy query condition and - * created according to this super table from management node. - * And then launching multiple async-queries on required virtual nodes, which is the first-stage query operation. + * Get the distribution of all tables among all available virtual nodes that are qualified for the query condition + * and created according to this super table from management node. + * And then launching multiple async-queries against all qualified virtual nodes, during the first-stage + * query operation. */ - int32_t code = tscGetMetricMeta(pSql); + int32_t code = tscGetMetricMeta(pSql, clauseIndex); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -5042,21 +4512,35 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; if (pMeterMetaInfo->pMeterMeta == NULL || pMetricMeta == NULL || pMetricMeta->numOfMeters == 0) { tscTrace("%p no table in metricmeta, no output result", pSql); - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; } // keep original limitation value in globalLimit - pCmd->globalLimit = pCmd->limit.limit; + pQueryInfo->clauseLimit = pQueryInfo->limit.limit; + pQueryInfo->prjOffset = pQueryInfo->limit.offset; + + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + /* + * the limitation/offset value should be removed during retrieve data from virtual node, + * since the global order are done in client side, so the limitation should also + * be done at the client side. + */ + if (pQueryInfo->limit.limit > 0) { + pQueryInfo->limit.limit = -1; + } + + pQueryInfo->limit.offset = 0; + } } else { - if (pCmd->slimit.limit != -1 || pCmd->slimit.offset != 0) { - return invalidSqlErrMsg(pCmd, msg1); + if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // filter the query functions operating on "tbname" column that are not supported by normal columns. - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } } @@ -5088,7 +4572,7 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* p pMsg->daysToKeep2 = htonl(pKeep->a[2].pVar.i64Key); break; } - default: { return invalidSqlErrMsg(pCmd, msg); } + default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } } } @@ -5112,7 +4596,7 @@ static int32_t setTimePrecisionOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreate strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) { pMsg->precision = TSDB_TIME_PRECISION_MICRO; } else { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } } @@ -5152,35 +4636,38 @@ int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) { return TSDB_CODE_SUCCESS; } -void tscAddTimestampColumn(SSqlCmd* pCmd, int16_t functionId, int16_t tableIndex) { +void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex) { // the first column not timestamp column, add it SSqlExpr* pExpr = NULL; - if (pCmd->exprsInfo.numOfExprs > 0) { - pExpr = tscSqlExprGet(pCmd, 0); + if (pQueryInfo->exprsInfo.numOfExprs > 0) { + pExpr = tscSqlExprGet(pQueryInfo, 0); } if (pExpr == NULL || pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX || pExpr->functionId != functionId) { SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscSqlExprInsert(pCmd, 0, functionId, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + pExpr = tscSqlExprInsert(pQueryInfo, 0, functionId, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); pExpr->colInfo.flag = TSDB_COL_NORMAL; // NOTE: tag column does not add to source column list SColumnList ids = getColumnList(1, tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX); - insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, "ts"); + insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, "ts"); } } -void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex) { - if (pParentObj->cmd.groupbyExpr.numOfGroupCols > 0) { - int32_t num = pSql->cmd.exprsInfo.numOfExprs; - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, num - 1); - SSqlCmd* pCmd = &pSql->cmd; +void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex) { + SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentObj->cmd, subClauseIndex); + + if (pParentQueryInfo->groupbyExpr.numOfGroupCols > 0) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex); + int32_t num = pQueryInfo->exprsInfo.numOfExprs; + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, num - 1); if (pExpr->functionId != TSDB_FUNC_TAG) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - int16_t columnInfo = tscGetJoinTagColIndexByUid(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); + int16_t columnInfo = tscGetJoinTagColIndexByUid(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid); SColumnIndex index = {.tableIndex = 0, .columnIndex = columnInfo}; SSchema* pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); @@ -5188,51 +4675,59 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIn int16_t bytes = pSchema[index.columnIndex].bytes; char* name = pSchema[index.columnIndex].name; - pExpr = tscSqlExprInsert(pCmd, pCmd->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, type, bytes, bytes); + pExpr = tscSqlExprInsert(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, type, bytes, + bytes); pExpr->colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list SColumnList ids = {0}; - insertResultField(pCmd, pCmd->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); + insertResultField(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); int32_t relIndex = index.columnIndex; pExpr->colInfo.colIdx = relIndex; - pCmd->groupbyExpr.columnInfo[0].colIdx = relIndex; + pQueryInfo->groupbyExpr.columnInfo[0].colIdx = relIndex; - addRequiredTagColumn(pCmd, pCmd->groupbyExpr.columnInfo[0].colIdx, 0); + addRequiredTagColumn(pQueryInfo, pQueryInfo->groupbyExpr.columnInfo[0].colIdx, 0); } } } -void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex) { - int32_t index = pCmd->groupbyExpr.columnInfo[tagIndex].colIdx; +// limit the output to be 1 for each state value +static void doLimitOutputNormalColOfGroupby(SSqlExpr* pExpr) { + int32_t outputRow = 1; + tVariantCreateFromBinary(&pExpr->param[0], (char*) &outputRow, sizeof(int32_t), TSDB_DATA_TYPE_INT); + pExpr->numOfParams = 1; +} + +void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) { + int32_t index = pQueryInfo->groupbyExpr.columnInfo[tagIndex].colIdx; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, index); SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = index}; - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, pCmd->fieldsInfo.numOfOutputCols, TSDB_FUNC_PRJ, &colIndex, pSchema->type, - pSchema->bytes, pSchema->bytes); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, TSDB_FUNC_PRJ, &colIndex, + pSchema->type, pSchema->bytes, pSchema->bytes); pExpr->colInfo.flag = TSDB_COL_NORMAL; - pExpr->param[0].i64Key = 1; - pExpr->numOfParams = 1; - + doLimitOutputNormalColOfGroupby(pExpr); + // NOTE: tag column does not add to source column list SColumnList list = {0}; list.num = 1; list.ids[0] = colIndex; - insertResultField(pCmd, pCmd->fieldsInfo.numOfOutputCols, &list, pSchema->bytes, pSchema->type, pSchema->name); - tscFieldInfoUpdateVisible(&pCmd->fieldsInfo, pCmd->fieldsInfo.numOfOutputCols - 1, false); + insertResultField(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, &list, pSchema->bytes, pSchema->type, + pSchema->name); + tscFieldInfoUpdateVisible(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutputCols - 1, false); } -static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { +static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) { int32_t tagLength = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAGPRJ || pExpr->functionId == TSDB_FUNC_TAG) { pExpr->functionId = TSDB_FUNC_TAG_DUMMY; tagLength += pExpr->resBytes; @@ -5242,11 +4737,11 @@ static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { } } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) { SSchema* pColSchema = &pSchema[pExpr->colInfo.colIdx]; getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, pExpr->param[0].i64Key, &pExpr->resType, @@ -5255,16 +4750,15 @@ static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { } } -static void doUpdateSqlFunctionForColPrj(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +static void doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_PRJ) { bool qualifiedCol = false; - for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { - if (pExpr->colInfo.colId == pCmd->groupbyExpr.columnInfo[j].colId) { + for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { + if (pExpr->colInfo.colId == pQueryInfo->groupbyExpr.columnInfo[j].colId) { qualifiedCol = true; - - pExpr->param[0].i64Key = 1; // limit the output to be 1 for each state value + doLimitOutputNormalColOfGroupby(pExpr); pExpr->numOfParams = 1; break; } @@ -5285,12 +4779,12 @@ static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId) return false; } -static bool onlyTagPrjFunction(SSqlCmd* pCmd) { +static bool onlyTagPrjFunction(SQueryInfo* pQueryInfo) { bool hasTagPrj = false; bool hasColumnPrj = false; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_PRJ) { hasColumnPrj = true; } else if (pExpr->functionId == TSDB_FUNC_TAGPRJ) { @@ -5302,16 +4796,16 @@ static bool onlyTagPrjFunction(SSqlCmd* pCmd) { } // check if all the tags prj columns belongs to the group by columns -static bool allTagPrjInGroupby(SSqlCmd* pCmd) { +static bool allTagPrjInGroupby(SQueryInfo* pQueryInfo) { bool allInGroupby = true; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_TAGPRJ) { continue; } - if (!tagColumnInGroupby(&pCmd->groupbyExpr, pExpr->colInfo.colId)) { + if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->colInfo.colId)) { allInGroupby = false; break; } @@ -5321,9 +4815,9 @@ static bool allTagPrjInGroupby(SSqlCmd* pCmd) { return allInGroupby; } -static void updateTagPrjFunction(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +static void updateTagPrjFunction(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAGPRJ) { pExpr->functionId = TSDB_FUNC_TAG; } @@ -5336,7 +4830,7 @@ static void updateTagPrjFunction(SSqlCmd* pCmd) { * 2. if selectivity function and tagprj function both exist, there should be only * one selectivity function exists. */ -static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { +static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) { const char* msg1 = "only one selectivity function allowed in presence of tags function"; const char* msg3 = "aggregation function should not be mixed up with projection"; @@ -5344,8 +4838,8 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { int16_t numOfSelectivity = 0; int16_t numOfAggregation = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAGPRJ || (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { tagColExists = true; @@ -5353,9 +4847,9 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { } } - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; - if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int16_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_ARITHM) { continue; } @@ -5371,44 +4865,44 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { // When the tag projection function on tag column that is not in the group by clause, aggregation function and // selectivity function exist in select clause is not allowed. if (numOfAggregation > 0) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } /* * if numOfSelectivity equals to 0, it is a super table projection query */ if (numOfSelectivity == 1) { - doUpdateSqlFunctionForTagPrj(pCmd); - doUpdateSqlFunctionForColPrj(pCmd); + doUpdateSqlFunctionForTagPrj(pQueryInfo); + doUpdateSqlFunctionForColPrj(pQueryInfo); } else if (numOfSelectivity > 1) { /* * If more than one selectivity functions exist, all the selectivity functions must be last_row. * Otherwise, return with error code. */ - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int16_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functionId == TSDB_FUNC_TAGPRJ) { continue; } if (((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) && (functionId != TSDB_FUNC_LAST_ROW)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } - doUpdateSqlFunctionForTagPrj(pCmd); - doUpdateSqlFunctionForColPrj(pCmd); + doUpdateSqlFunctionForTagPrj(pQueryInfo); + doUpdateSqlFunctionForColPrj(pQueryInfo); } } else { - if ((pCmd->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) == TSDB_QUERY_TYPE_PROJECTION_QUERY) { - if (numOfAggregation > 0 && pCmd->groupbyExpr.numOfGroupCols == 0) { - return invalidSqlErrMsg(pCmd, msg3); + if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) == TSDB_QUERY_TYPE_PROJECTION_QUERY) { + if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } if (numOfAggregation > 0 || numOfSelectivity > 0) { // clear the projection type flag - pCmd->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY); - doUpdateSqlFunctionForColPrj(pCmd); + pQueryInfo->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY); + doUpdateSqlFunctionForColPrj(pQueryInfo); } } } @@ -5416,18 +4910,18 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { return TSDB_CODE_SUCCESS; } -static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { +static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) { const char* msg2 = "interval not allowed in group by normal column"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); int16_t bytes = 0; int16_t type = 0; char* name = NULL; - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { - SColIndexEx* pColIndex = &pCmd->groupbyExpr.columnInfo[i]; + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { + SColIndexEx* pColIndex = &pQueryInfo->groupbyExpr.columnInfo[i]; int16_t colIndex = pColIndex->colIdx; if (pColIndex->colIdx == TSDB_TBNAME_COLUMN_INDEX) { @@ -5444,25 +4938,25 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { } if (TSDB_COL_IS_TAG(pColIndex->flag)) { - SColumnIndex index = {.tableIndex = pCmd->groupbyExpr.tableIndex, .columnIndex = colIndex}; + SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = - tscSqlExprInsert(pCmd, pCmd->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, type, bytes, bytes); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, + type, bytes, bytes); pExpr->colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list SColumnList ids = {0}; - insertResultField(pCmd, pCmd->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); + insertResultField(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); } else { // if this query is "group by" normal column, interval is not allowed - if (pCmd->nAggTimeInterval > 0) { - return invalidSqlErrMsg(pCmd, msg2); + if (pQueryInfo->nAggTimeInterval > 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } bool hasGroupColumn = false; - for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, j); + for (int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, j); if (pExpr->colInfo.colId == pColIndex->colId) { break; } @@ -5473,7 +4967,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { * but invisible to user */ if (!hasGroupColumn) { - doAddGroupColumnForSubquery(pCmd, i); + doAddGroupColumnForSubquery(pQueryInfo, i); } } } @@ -5481,33 +4975,31 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { return TSDB_CODE_SUCCESS; } -int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { +int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { const char* msg1 = "functions/columns not allowed in group by query"; const char* msg2 = "projection query on columns not allowed"; const char* msg3 = "group by not allowed on projection query"; const char* msg4 = "retrieve tags not compatible with group by or interval query"; - SSqlCmd* pCmd = &pSql->cmd; - // only retrieve tags, group by is not supportted if (pCmd->command == TSDB_SQL_RETRIEVE_TAGS) { - if (pCmd->groupbyExpr.numOfGroupCols > 0 || pCmd->nAggTimeInterval > 0) { - return invalidSqlErrMsg(pCmd, msg4); + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->nAggTimeInterval > 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } else { return TSDB_CODE_SUCCESS; } } - if (pCmd->groupbyExpr.numOfGroupCols > 0) { + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { // check if all the tags prj columns belongs to the group by columns - if (onlyTagPrjFunction(pCmd) && allTagPrjInGroupby(pCmd)) { - updateTagPrjFunction(pCmd); - return doAddGroupbyColumnsOnDemand(pCmd); + if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) { + updateTagPrjFunction(pQueryInfo); + return doAddGroupbyColumnsOnDemand(pQueryInfo); } // check all query functions in selection clause, multi-output functions are not allowed - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t functId = pExpr->functionId; /* @@ -5516,8 +5008,8 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { */ if (functId == TSDB_FUNC_PRJ && pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool qualified = false; - for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { - SColIndexEx* pColIndex = &pCmd->groupbyExpr.columnInfo[j]; + for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { + SColIndexEx* pColIndex = &pQueryInfo->groupbyExpr.columnInfo[j]; if (pColIndex->colId == pExpr->colInfo.colId) { qualified = true; break; @@ -5525,21 +5017,21 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { } if (!qualified) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM && functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } - if (checkUpdateTagPrjFunctions(pCmd) != TSDB_CODE_SUCCESS) { + if (checkUpdateTagPrjFunctions(pQueryInfo) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } @@ -5547,34 +5039,34 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { * group by tag function must be not changed the function name, otherwise, the group operation may fail to * divide the subset of final result. */ - if (doAddGroupbyColumnsOnDemand(pCmd) != TSDB_CODE_SUCCESS) { + if (doAddGroupbyColumnsOnDemand(pQueryInfo) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } // projection query on metric does not compatible with "group by" syntax - if (tscProjectionQueryOnMetric(pCmd)) { - return invalidSqlErrMsg(pCmd, msg3); + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } return TSDB_CODE_SUCCESS; } else { - return checkUpdateTagPrjFunctions(pCmd); + return checkUpdateTagPrjFunctions(pQueryInfo); } } -int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd) { +int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg1 = "only one expression allowed"; const char* msg2 = "invalid expression in select clause"; const char* msg3 = "invalid function"; tSQLExprList* pExprList = pQuerySql->pSelection; if (pExprList->nExpr != 1) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } tSQLExpr* pExpr = pExprList->a[0].pNode; if (pExpr->operand.z == NULL) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } // TODO redefine the function @@ -5593,7 +5085,7 @@ int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd) { } } - SSqlExpr* pExpr1 = tscSqlExprInsertEmpty(pCmd, 0, TSDB_FUNC_TAG_DUMMY); + SSqlExpr* pExpr1 = tscSqlExprInsertEmpty(pQueryInfo, 0, TSDB_FUNC_TAG_DUMMY); if (pExprList->a[0].aliasName != NULL) { strncpy(pExpr1->aliasName, pExprList->a[0].aliasName, tListLen(pExpr1->aliasName)); } else { @@ -5602,21 +5094,21 @@ int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd) { switch (index) { case 0: - pCmd->command = TSDB_SQL_CURRENT_DB; + pQueryInfo->command = TSDB_SQL_CURRENT_DB; return TSDB_CODE_SUCCESS; case 1: - pCmd->command = TSDB_SQL_SERV_VERSION; + pQueryInfo->command = TSDB_SQL_SERV_VERSION; return TSDB_CODE_SUCCESS; case 2: - pCmd->command = TSDB_SQL_SERV_STATUS; + pQueryInfo->command = TSDB_SQL_SERV_STATUS; return TSDB_CODE_SUCCESS; case 3: - pCmd->command = TSDB_SQL_CLI_VERSION; + pQueryInfo->command = TSDB_SQL_CLI_VERSION; return TSDB_CODE_SUCCESS; case 4: - pCmd->command = TSDB_SQL_CURRENT_USER; + pQueryInfo->command = TSDB_SQL_CURRENT_USER; return TSDB_CODE_SUCCESS; - default: { return invalidSqlErrMsg(pCmd, msg3); } + default: { return invalidSqlErrMsg(pQueryInfo->msg, msg3); } } } @@ -5626,97 +5118,550 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) { if (pCreate->commitLog != -1 && (pCreate->commitLog < 0 || pCreate->commitLog > 1)) { snprintf(msg, tListLen(msg), "invalid db option commitLog: %d, only 0 or 1 allowed", pCreate->commitLog); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } if (pCreate->replications != -1 && (pCreate->replications < TSDB_REPLICA_MIN_NUM || pCreate->replications > TSDB_REPLICA_MAX_NUM)) { snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications, TSDB_REPLICA_MIN_NUM, TSDB_REPLICA_MAX_NUM); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } int32_t val = htonl(pCreate->daysPerFile); if (val != -1 && (val < TSDB_FILE_MIN_PARTITION_RANGE || val > TSDB_FILE_MAX_PARTITION_RANGE)) { snprintf(msg, tListLen(msg), "invalid db option daysPerFile: %d valid range: [%d, %d]", val, TSDB_FILE_MIN_PARTITION_RANGE, TSDB_FILE_MAX_PARTITION_RANGE); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } val = htonl(pCreate->rowsInFileBlock); if (val != -1 && (val < TSDB_MIN_ROWS_IN_FILEBLOCK || val > TSDB_MAX_ROWS_IN_FILEBLOCK)) { snprintf(msg, tListLen(msg), "invalid db option rowsInFileBlock: %d valid range: [%d, %d]", val, TSDB_MIN_ROWS_IN_FILEBLOCK, TSDB_MAX_ROWS_IN_FILEBLOCK); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } val = htonl(pCreate->cacheBlockSize); if (val != -1 && (val < TSDB_MIN_CACHE_BLOCK_SIZE || val > TSDB_MAX_CACHE_BLOCK_SIZE)) { snprintf(msg, tListLen(msg), "invalid db option cacheBlockSize: %d valid range: [%d, %d]", val, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MAX_CACHE_BLOCK_SIZE); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } val = htonl(pCreate->maxSessions); if (val != -1 && (val < TSDB_MIN_TABLES_PER_VNODE || val > TSDB_MAX_TABLES_PER_VNODE)) { snprintf(msg, tListLen(msg), "invalid db option maxSessions: %d valid range: [%d, %d]", val, TSDB_MIN_TABLES_PER_VNODE, TSDB_MAX_TABLES_PER_VNODE); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) { snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } if (pCreate->cacheNumOfBlocks.fraction != -1 && (pCreate->cacheNumOfBlocks.fraction < TSDB_MIN_AVG_BLOCKS || pCreate->cacheNumOfBlocks.fraction > TSDB_MAX_AVG_BLOCKS)) { snprintf(msg, tListLen(msg), "invalid db option ablocks: %f valid value: [%d, %d]", pCreate->cacheNumOfBlocks.fraction, TSDB_MIN_AVG_BLOCKS, TSDB_MAX_AVG_BLOCKS); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } val = htonl(pCreate->commitTime); if (val != -1 && (val < TSDB_MIN_COMMIT_TIME_INTERVAL || val > TSDB_MAX_COMMIT_TIME_INTERVAL)) { snprintf(msg, tListLen(msg), "invalid db option commitTime: %d valid range: [%d, %d]", val, TSDB_MIN_COMMIT_TIME_INTERVAL, TSDB_MAX_COMMIT_TIME_INTERVAL); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } if (pCreate->compression != -1 && (pCreate->compression < TSDB_MIN_COMPRESSION_LEVEL || pCreate->compression > TSDB_MAX_COMPRESSION_LEVEL)) { snprintf(msg, tListLen(msg), "invalid db option compression: %d valid range: [%d, %d]", pCreate->compression, TSDB_MIN_COMPRESSION_LEVEL, TSDB_MAX_COMPRESSION_LEVEL); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } return TSDB_CODE_SUCCESS; } // for debug purpose -void tscPrintSelectClause(SSqlCmd* pCmd) { - if (pCmd == NULL || pCmd->exprsInfo.numOfExprs == 0) { +void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex); + + if (pQueryInfo->exprsInfo.numOfExprs == 0) { return; } - char* str = calloc(1, 10240); + int32_t totalBufSize = 1024; + + char str[1024] = {0}; int32_t offset = 0; - offset += sprintf(str, "%d [", pCmd->exprsInfo.numOfExprs); - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + offset += sprintf(str, "num:%d [", pQueryInfo->exprsInfo.numOfExprs); + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - int32_t size = sprintf(str + offset, "%s(%d)", aAggs[pExpr->functionId].aName, pExpr->colInfo.colId); - offset += size; + char tmpBuf[1024] = {0}; + int32_t tmpLen = 0; + tmpLen = sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].aName, pExpr->uid, pExpr->colInfo.colId); + if (tmpLen + offset > totalBufSize) break; - if (i < pCmd->exprsInfo.numOfExprs - 1) { + offset += sprintf(str + offset, "%s", tmpBuf); + + if (i < pQueryInfo->exprsInfo.numOfExprs - 1) { str[offset++] = ','; } } str[offset] = ']'; - printf("%s\n", str); + tscTrace("%p select clause:%s", pSql, str); +} + +int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) { + const char* msg1 = "invalid table name"; + const char* msg2 = "table name too long"; + + SSqlCmd* pCmd = &pSql->cmd; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; + + tFieldList* pFieldList = pCreateTable->colInfo.pColumns; + tFieldList* pTagList = pCreateTable->colInfo.pTagColumns; + + assert(pFieldList != NULL); + + // if sql specifies db, use it, otherwise use default db + SSQLToken* pzTableName = &(pCreateTable->name); + + if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (setMeterID(pMeterMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + if (!validateTableColumnInfo(pFieldList, pCmd) || + (pTagList != NULL && !validateTagParams(pTagList, pFieldList, pCmd))) { + return TSDB_CODE_INVALID_SQL; + } + + int32_t col = 0; + for (; col < pFieldList->nField; ++col) { + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, col, &pFieldList->p[col]); + } + + pCmd->numOfCols = (int16_t)pFieldList->nField; + + if (pTagList != NULL) { // create metric[optional] + for (int32_t i = 0; i < pTagList->nField; ++i) { + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, col++, &pTagList->p[i]); + } + + pCmd->count = pTagList->nField; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { + const char* msg1 = "invalid table name"; + const char* msg3 = "tag value too long"; + const char* msg4 = "illegal value or data overflow"; + const char* msg5 = "tags number not matched"; + + SSqlCmd* pCmd = &pSql->cmd; + + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + // two table: the first one is for current table, and the secondary is for the super table. + tscAddEmptyMeterMetaInfo(pQueryInfo); + assert(pQueryInfo->numOfTables == 2); + + const int32_t TABLE_INDEX = 0; + const int32_t STABLE_INDEX = 1; + + SMeterMetaInfo* pStableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, STABLE_INDEX); + + // super table name, create table by using dst + SSQLToken* pToken = &(pCreateTable->usingInfo.stableName); + + if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (setMeterID(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + // get meter meta from mnode + strncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, TSDB_METER_ID_LEN); + tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; + + int32_t code = tscGetMeterMeta(pSql, pStableMeterMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pStableMeterMetaInfo->pMeterMeta->numOfTags != pList->nExpr) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + // too long tag values will return invalid sql, not be truncated automatically + SSchema* pTagSchema = tsGetTagSchema(pStableMeterMetaInfo->pMeterMeta); + + char* tagVal = pCreateTable->usingInfo.tagdata.data; + for (int32_t i = 0; i < pList->nExpr; ++i) { + int32_t ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type); + if (ret != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + // validate the length of binary + if ((pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) && + pList->a[i].pVar.nLen > pTagSchema[i].bytes) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + tagVal += pTagSchema[i].bytes; + } + + // table name + if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + SMeterMetaInfo* pTableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, TABLE_INDEX); + int32_t ret = setMeterID(pTableMeterMetaInfo, &pInfo->pCreateTableInfo->name, pSql); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { + const char* msg1 = "invalid table name"; + const char* msg2 = "table name too long"; + const char* msg3 = "fill only available for interval query"; + const char* msg4 = "fill option not supported in stream computing"; + const char* msg5 = "sql too long"; // todo ADD support + + SSqlCmd* pCmd = &pSql->cmd; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + assert(pQueryInfo->numOfTables == 1); + + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + // if sql specifies db, use it, otherwise use default db + SSQLToken* pzTableName = &(pCreateTable->name); + SQuerySQL* pQuerySql = pCreateTable->pSelect; + + if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; + tVariant* pVar = &pSrcMeterName->a[0].pVar; + + SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING}; + if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } + + if (setMeterID(pMeterMetaInfo, &srcToken, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); + } + + int32_t code = tscGetMeterMeta(pSql, pMeterMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + bool isSTable = UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); + if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + if (pQuerySql->pWhere != NULL) { // query condition in stream computing + if (parseWhereClause(pQueryInfo, &pQuerySql->pWhere, pSql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set interval value + if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } else { + if ((pQueryInfo->nAggTimeInterval > 0) && + (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set the created table[stream] name + if (setMeterID(pMeterMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } + + if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) { + return invalidSqlErrMsg(pQueryInfo->msg, msg5); + } + + if (tsRewriteFieldNameIfNecessary(pQueryInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; + + if (validateSqlFunctionInStreamSql(pQueryInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + /* + * check if fill operation is available, the fill operation is parsed and executed during query execution, + * not here. + */ + if (pQuerySql->fillType != NULL) { + if (pQueryInfo->nAggTimeInterval == 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); + } + + tVariantListItem* pItem = &pQuerySql->fillType->a[0]; + if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { + if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || + (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } + } + } + + // set the number of stream table columns + pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; + return TSDB_CODE_SUCCESS; +} + +int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { + assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); + + const char* msg0 = "invalid table name"; + const char* msg1 = "table name too long"; + const char* msg2 = "point interpolation query needs timestamp"; + const char* msg3 = "sliding value too small"; + const char* msg4 = "sliding value no larger than the interval value"; + const char* msg5 = "fill only available for interval query"; + const char* msg6 = "start(end) time of query range required or time range too large"; + const char* msg7 = "illegal number of tables in from clause"; + const char* msg8 = "too many columns in selection clause"; + const char* msg9 = "TWA query requires both the start and end time"; + + int32_t code = TSDB_CODE_SUCCESS; + + SSqlCmd* pCmd = &pSql->cmd; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (pMeterMetaInfo == NULL) { + pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); + } + + // too many result columns not support order by in query + if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); + } + + /* + * handle the sql expression without from subclause + * select current_database(); + * select server_version(); + * select client_version(); + * select server_state(); + */ + if (pQuerySql->from == NULL) { + assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL && + pQuerySql->pSortOrder == NULL); + return doLocalQueryProcess(pQueryInfo, pQuerySql); + } + + if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + + pQueryInfo->command = TSDB_SQL_SELECT; + + // set all query tables, which are maybe more than one. + for (int32_t i = 0; i < pQuerySql->from->nExpr; ++i) { + tVariant* pTableItem = &pQuerySql->from->a[i].pVar; + + if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); + } + + pTableItem->nLen = strdequote(pTableItem->pz); + + SSQLToken tableName = {.z = pTableItem->pz, .n = pTableItem->nLen, .type = TK_STRING}; + if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); + } + + if (pQueryInfo->numOfTables <= i) { // more than one table + tscAddEmptyMeterMetaInfo(pQueryInfo); + } + + SMeterMetaInfo* pMeterInfo1 = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + + SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; + if (setMeterID(pMeterInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + code = tscGetMeterMeta(pSql, pMeterInfo1); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr); + + // parse the group by clause in the first place + if (parseGroupbyClause(pQueryInfo, pQuerySql->pGroupby, pCmd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + bool isSTable = UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); + if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set interval value + if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } else { + if ((pQueryInfo->nAggTimeInterval > 0) && + (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set order by info + if (parseOrderbyClause(pQueryInfo, pQuerySql, tsGetSchema(pMeterMetaInfo->pMeterMeta)) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set where info + if (pQuerySql->pWhere != NULL) { + if (parseWhereClause(pQueryInfo, &pQuerySql->pWhere, pSql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + pQuerySql->pWhere = NULL; + + if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { + pQueryInfo->stime = pQueryInfo->stime / 1000; + pQueryInfo->etime = pQueryInfo->etime / 1000; + } + } else { // set the time rang + pQueryInfo->stime = 0; + pQueryInfo->etime = INT64_MAX; + } + + // user does not specified the query time window, twa is not allowed in such case. + if ((pQueryInfo->stime == 0 || pQueryInfo->etime == INT64_MAX || + (pQueryInfo->etime == INT64_MAX / 1000 && pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI)) && + tscIsTWAQuery(pQueryInfo)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); + } + + // no result due to invalid query time range + if (pQueryInfo->stime > pQueryInfo->etime) { + pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } + + if (!hasTimestampForPointInterpQuery(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); + } + +// // set sliding value, the query time range needs to be decide in the first place +// SSQLToken* pSliding = &pQuerySql->sliding; +// if (pSliding->n != 0) { +// if (!tscEmbedded && pCmd->inStream == 0 && hasDefaultQueryTimeRange(pQueryInfo)) { // sliding only allowed in stream +// const char* msg = "time range expected for sliding window query"; +// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); +// } +// +// getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->nSlidingTime); +// if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { +// pQueryInfo->nSlidingTime /= 1000; +// } +// +// if (pQueryInfo->nSlidingTime < tsMinSlidingTime) { +// return invalidSqlErrMsg(pQueryInfo->msg, msg3); +// } +// +// if (pQueryInfo->nSlidingTime > pQueryInfo->nAggTimeInterval) { +// return invalidSqlErrMsg(pQueryInfo->msg, msg4); +// } +// } else { +// pQueryInfo->nSlidingTime = -1; +// } + + // in case of join query, time range is required. + if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { + int64_t timeRange = labs(pQueryInfo->stime - pQueryInfo->etime); + + if (timeRange == 0 && pQueryInfo->stime == 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + } + + if ((code = parseLimitClause(pQueryInfo, index, pQuerySql, pSql)) != TSDB_CODE_SUCCESS) { + return code; + } - free(str); + if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) { + return code; + } + + setColumnOffsetValueInResultset(pQueryInfo); + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + updateTagColumnIndex(pQueryInfo, i); + } + + /* + * fill options are set at the end position, when all columns are set properly + * the columns may be increased due to group by operation + */ + if (pQuerySql->fillType != NULL) { + if (pQueryInfo->nAggTimeInterval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + if (pQueryInfo->nAggTimeInterval > 0) { + int64_t timeRange = labs(pQueryInfo->stime - pQueryInfo->etime); + // number of result is not greater than 10,000,000 + if ((timeRange == 0) || (timeRange / pQueryInfo->nAggTimeInterval) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) { + return invalidSqlErrMsg(pQueryInfo->msg, msg6); + } + } + + int32_t ret = parseFillClause(pQueryInfo, pQuerySql); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } + + return TSDB_CODE_SUCCESS; // Does not build query message here } + +bool hasDefaultQueryTimeRange(SQueryInfo *pQueryInfo) { + return (pQueryInfo->stime == 0 && pQueryInfo->etime == INT64_MAX) || + (pQueryInfo->stime == INT64_MAX && pQueryInfo->etime == 0); +} \ No newline at end of file diff --git a/src/client/src/tscSQLParserImpl.c b/src/client/src/tscSQLParserImpl.c index cc4375fb03896b240bc57fdabcf2728ade996329..17e1c6f45790acc36c35111aec75e00be227044f 100644 --- a/src/client/src/tscSQLParserImpl.c +++ b/src/client/src/tscSQLParserImpl.c @@ -24,7 +24,7 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) { void *pParser = ParseAlloc(malloc); - pSQLInfo->validSql = true; + pSQLInfo->valid = true; int32_t i = 0; while (1) { @@ -50,12 +50,12 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) { } case TK_ILLEGAL: { snprintf(pSQLInfo->pzErrMsg, tListLen(pSQLInfo->pzErrMsg), "unrecognized token: \"%s\"", t0.z); - pSQLInfo->validSql = false; + pSQLInfo->valid = false; goto abort_parse; } default: Parse(pParser, t0.type, t0, pSQLInfo); - if (pSQLInfo->validSql == false) { + if (pSQLInfo->valid == false) { goto abort_parse; } } @@ -554,58 +554,64 @@ tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExp return pList; } -void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList) { - SInsertSQL *pInsert = calloc(1, sizeof(SInsertSQL)); - - pInsert->name = *pName; - pInsert->pValue = pList; - - pInfo->pInsertInfo = pInsert; - pInfo->sqlType = TSQL_INSERT; -} - -void destroyQuerySql(SQuerySQL *pSql) { - if (pSql == NULL) return; - - tSQLExprListDestroy(pSql->pSelection); - pSql->pSelection = NULL; - - tSQLExprDestroy(pSql->pWhere); - pSql->pWhere = NULL; - - tVariantListDestroy(pSql->pSortOrder); - pSql->pSortOrder = NULL; - - tVariantListDestroy(pSql->pGroupby); - pSql->pGroupby = NULL; - - tVariantListDestroy(pSql->from); - pSql->from = NULL; - - tVariantListDestroy(pSql->fillType); +void doDestroyQuerySql(SQuerySQL *pQuerySql) { + if (pQuerySql == NULL) { + return; + } + + tSQLExprListDestroy(pQuerySql->pSelection); + + pQuerySql->pSelection = NULL; + + tSQLExprDestroy(pQuerySql->pWhere); + pQuerySql->pWhere = NULL; + + tVariantListDestroy(pQuerySql->pSortOrder); + pQuerySql->pSortOrder = NULL; + + tVariantListDestroy(pQuerySql->pGroupby); + pQuerySql->pGroupby = NULL; + + tVariantListDestroy(pQuerySql->from); + pQuerySql->from = NULL; + + tVariantListDestroy(pQuerySql->fillType); + + free(pQuerySql); +} + +void destroyAllSelectClause(SSubclauseInfo *pClause) { + if (pClause == NULL || pClause->numOfClause == 0) { + return; + } - free(pSql); + for(int32_t i = 0; i < pClause->numOfClause; ++i) { + SQuerySQL *pQuerySql = pClause->pClause[i]; + doDestroyQuerySql(pQuerySql); + } + + tfree(pClause->pClause); } -SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName, +SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pStableName, tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type) { SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL)); switch (type) { - case TSQL_CREATE_NORMAL_METER: { + case TSQL_CREATE_TABLE: { pCreate->colInfo.pColumns = pCols; assert(pTagVals == NULL && pTags == NULL); break; } - case TSQL_CREATE_NORMAL_METRIC: { + case TSQL_CREATE_STABLE: { pCreate->colInfo.pColumns = pCols; pCreate->colInfo.pTagColumns = pTags; assert(pTagVals == NULL && pTags != NULL && pCols != NULL); break; } - case TSQL_CREATE_METER_FROM_METRIC: { + case TSQL_CREATE_TABLE_FROM_STABLE: { pCreate->usingInfo.pTagVals = pTagVals; - pCreate->usingInfo.metricName = *pMetricName; + pCreate->usingInfo.stableName = *pStableName; break; } case TSQL_CREATE_STREAM: { @@ -616,19 +622,24 @@ SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLTo assert(false); } + pCreate->type = type; return pCreate; } SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type) { SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL)); + pAlterTable->name = *pMeterName; + pAlterTable->type = type; - if (type == ALTER_TABLE_ADD_COLUMN || type == ALTER_TABLE_TAGS_ADD) { + if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { pAlterTable->pAddColumns = pCols; assert(pVals == NULL); } else { - /* ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP, - * ALTER_TABLE_DROP_COLUMN */ + /* + * ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP, + * ALTER_TABLE_DROP_COLUMN + */ pAlterTable->varList = pVals; assert(pCols == NULL); } @@ -639,27 +650,28 @@ SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tV void SQLInfoDestroy(SSqlInfo *pInfo) { if (pInfo == NULL) return; - if (pInfo->sqlType == TSQL_QUERY_METER) { - destroyQuerySql(pInfo->pQueryInfo); - } else if (pInfo->sqlType >= TSQL_CREATE_NORMAL_METER && pInfo->sqlType <= TSQL_CREATE_STREAM) { + if (pInfo->type == TSDB_SQL_SELECT) { + destroyAllSelectClause(&pInfo->subclauseInfo); + } else if (pInfo->type == TSDB_SQL_CREATE_TABLE) { SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; - destroyQuerySql(pCreateTableInfo->pSelect); + doDestroyQuerySql(pCreateTableInfo->pSelect); tFieldListDestroy(pCreateTableInfo->colInfo.pColumns); tFieldListDestroy(pCreateTableInfo->colInfo.pTagColumns); tVariantListDestroy(pCreateTableInfo->usingInfo.pTagVals); tfree(pInfo->pCreateTableInfo); - } else if (pInfo->sqlType >= ALTER_TABLE_TAGS_ADD && pInfo->sqlType <= ALTER_TABLE_DROP_COLUMN) { + } else if (pInfo->type == TSDB_SQL_ALTER_TABLE) { tVariantListDestroy(pInfo->pAlterInfo->varList); tFieldListDestroy(pInfo->pAlterInfo->pAddColumns); + tfree(pInfo->pAlterInfo); } else { if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) { free(pInfo->pDCLInfo->a); } - if (pInfo->sqlType == CREATE_DATABASE) { + if (pInfo->type == TSDB_SQL_CREATE_DB) { tVariantListDestroy(pInfo->pDCLInfo->dbOpt.keep); } @@ -667,13 +679,52 @@ void SQLInfoDestroy(SSqlInfo *pInfo) { } } -void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) { - pInfo->sqlType = type; - pInfo->pCreateTableInfo = pSqlExprInfo; +SSubclauseInfo* setSubclause(SSubclauseInfo* pSubclause, void *pSqlExprInfo) { + if (pSubclause == NULL) { + pSubclause = calloc(1, sizeof(SSubclauseInfo)); + } + + int32_t newSize = pSubclause->numOfClause + 1; + char* tmp = realloc(pSubclause->pClause, newSize * POINTER_BYTES); + if (tmp == NULL) { + return pSubclause; + } + + pSubclause->pClause = (SQuerySQL**) tmp; + + pSubclause->pClause[newSize - 1] = pSqlExprInfo; + pSubclause->numOfClause++; + + return pSubclause; +} +SSqlInfo* setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) { + pInfo->type = type; + + if (type == TSDB_SQL_SELECT) { + pInfo->subclauseInfo = *(SSubclauseInfo*) pSqlExprInfo; + free(pSqlExprInfo); + } else { + pInfo->pCreateTableInfo = pSqlExprInfo; + } + if (pMeterName != NULL) { pInfo->pCreateTableInfo->name = *pMeterName; } + + return pInfo; +} + +SSubclauseInfo* appendSelectClause(SSubclauseInfo *pQueryInfo, void *pSubclause) { + char* tmp = realloc(pQueryInfo->pClause, (pQueryInfo->numOfClause + 1) * POINTER_BYTES); + if (tmp == NULL) { // out of memory + return pQueryInfo; + } + + pQueryInfo->pClause = (SQuerySQL**) tmp; + pQueryInfo->pClause[pQueryInfo->numOfClause++] = pSubclause; + + return pQueryInfo; } void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists) { @@ -703,23 +754,57 @@ tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken) { } void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) { - pInfo->sqlType = type; + pInfo->type = type; if (nParam == 0) return; - if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = (tDCLSQL *)calloc(1, sizeof(tDCLSQL)); va_list va; va_start(va, nParam); while (nParam-- > 0) { SSQLToken *pToken = va_arg(va, SSQLToken *); - tTokenListAppend(pInfo->pDCLInfo, pToken); + (void)tTokenListAppend(pInfo->pDCLInfo, pToken); } va_end(va); } +void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck) { + pInfo->type = type; + + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + tTokenListAppend(pInfo->pDCLInfo, pToken); + pInfo->pDCLInfo->existsCheck = (existsCheck->n == 1); +} + +void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns) { + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + pInfo->type = TSDB_SQL_SHOW; + + SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt; + pShowInfo->showType = type; + + if (prefix != NULL && prefix->type != 0) { + pShowInfo->prefix = *prefix; + } else { + pShowInfo->prefix.type = 0; + } + + if (pPatterns != NULL && pPatterns->type != 0) { + pShowInfo->pattern = *pPatterns; + } else { + pShowInfo->pattern.type = 0; + } +} + void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists) { - pInfo->sqlType = type; + pInfo->type = type; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); } @@ -731,18 +816,67 @@ void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBI } void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo) { - pInfo->sqlType = type; + pInfo->type = type; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); } pInfo->pDCLInfo->acctOpt = *pAcctInfo; + + assert(pName != NULL); + pInfo->pDCLInfo->user.user = *pName; + + if (pPwd != NULL) { + pInfo->pDCLInfo->user.passwd = *pPwd; + } +} + +void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd) { + pInfo->type = TSDB_SQL_CREATE_USER; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + assert(pName != NULL && pPasswd != NULL); + + pInfo->pDCLInfo->user.user = *pName; + pInfo->pDCLInfo->user.passwd = *pPasswd; +} - tTokenListAppend(pInfo->pDCLInfo, pName); +void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege) { + pInfo->type = TSDB_SQL_ALTER_USER; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + assert(pName != NULL); + + SUserInfo* pUser = &pInfo->pDCLInfo->user; + pUser->type = type; + pUser->user = *pName; + + if (pPwd != NULL) { + pUser->passwd = *pPwd; + } else { + pUser->passwd.type = TSDB_DATA_TYPE_NULL; + } + + if (pPrivilege != NULL) { + pUser->privilege = *pPrivilege; + } else { + pUser->privilege.type = TSDB_DATA_TYPE_NULL; + } +} - if (pPwd->n > 0) { - tTokenListAppend(pInfo->pDCLInfo, pPwd); +void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip) { + pInfo->type = type; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); } + + assert(ip != NULL); + + pInfo->pDCLInfo->ip = *ip; } void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) { diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c index 737c1342d8b8d852a9fa66fd8c5635be15b4d911..30f1dfad773304f98b76df152cb90085bf95c55f 100644 --- a/src/client/src/tscSecondaryMerge.c +++ b/src/client/src/tscSecondaryMerge.c @@ -58,12 +58,13 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu * the fields and offset attributes in pCmd and pModel may be different due to * merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object. */ - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { SQLFunctionCtx *pCtx = &pReducer->pCtx[i]; - pCtx->aOutputBuf = pReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pReducer->resColModel->maxCapacity; - pCtx->order = pCmd->order.order; - pCtx->functionId = pCmd->exprsInfo.pExprs[i].functionId; + pCtx->aOutputBuf = pReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pReducer->resColModel->maxCapacity; + pCtx->order = pQueryInfo->order.order; + pCtx->functionId = pQueryInfo->exprsInfo.pExprs[i].functionId; // input buffer hold only one point data pCtx->aInputElemBuf = pReducer->pTempBuffer->data + pDesc->pSchema->colOffset[i]; @@ -72,7 +73,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu pCtx->inputType = pDesc->pSchema->pFields[i].type; pCtx->inputBytes = pDesc->pSchema->pFields[i].bytes; - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); // output data format yet comes from pCmd. pCtx->outputBytes = pField->bytes; pCtx->outputType = pField->type; @@ -84,15 +85,15 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu pRes->bytes[i] = pField->bytes; - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); // for top/bottom function, the output of timestamp is the first column int32_t functionId = pExpr->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf; - pCtx->param[2].i64Key = pCmd->order.order; + pCtx->param[2].i64Key = pQueryInfo->order.order; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; - pCtx->param[1].i64Key = pCmd->order.orderColId; + pCtx->param[1].i64Key = pQueryInfo->order.orderColId; } SResultInfo *pResInfo = &pReducer->pResInfo[i]; @@ -105,11 +106,11 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu int16_t n = 0; int16_t tagLen = 0; - SQLFunctionCtx** pTagCtx = calloc(pCmd->fieldsInfo.numOfOutputCols, POINTER_BYTES); + SQLFunctionCtx** pTagCtx = calloc(pQueryInfo->fieldsInfo.numOfOutputCols, POINTER_BYTES); SQLFunctionCtx* pCtx = NULL; - for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for(int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAG_DUMMY || pExpr->functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pExpr->resBytes; pTagCtx[n++] = &pReducer->pCtx[i]; @@ -212,9 +213,11 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSqlObjAddr, i + 1, idx + 1); tExtMemBufferLoadData(pMemBuffer[i], &(pDS->filePage), j, 0); #ifdef _DEBUG_VIEW - printf("load data page into mem for build loser tree: %ld rows\n", pDS->filePage.numOfElems); + printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", pDS->filePage.numOfElems); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, pCmd); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + tscGetSrcColumnInfo(colInfo, pQueryInfo); tColModelDisplayEx(pDesc->pSchema, pDS->filePage.data, pDS->filePage.numOfElems, pMemBuffer[0]->numOfElemsPerPage, colInfo); @@ -238,7 +241,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd param->pLocalData = pReducer->pLocalDataSrc; param->pDesc = pReducer->pDesc; param->numOfElems = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; - param->groupOrderType = pCmd->groupbyExpr.orderType; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + param->groupOrderType = pQueryInfo->groupbyExpr.orderType; pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); if (pReducer->pLoserTree == NULL || pRes->code != 0) { @@ -247,12 +252,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd // the input data format follows the old format, but output in a new format. // so, all the input must be parsed as old format - pReducer->pCtx = (SQLFunctionCtx *)calloc(pCmd->fieldsInfo.numOfOutputCols, sizeof(SQLFunctionCtx)); + pReducer->pCtx = (SQLFunctionCtx *)calloc(pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(SQLFunctionCtx)); pReducer->rowSize = pMemBuffer[0]->nElemSize; - tscRestoreSQLFunctionForMetricQuery(pCmd); - tscFieldInfoCalOffset(pCmd); + tscRestoreSQLFunctionForMetricQuery(pQueryInfo); + tscFieldInfoCalOffset(pQueryInfo); if (pReducer->rowSize > pMemBuffer[0]->nPageSize) { assert(false); // todo fixed row size is larger than the minimum page size; @@ -272,7 +277,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pReducer->nResultBufSize = pMemBuffer[0]->nPageSize * 16; pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage)); - int32_t finalRowLength = tscGetResRowLength(pCmd); + int32_t finalRowLength = tscGetResRowLength(pQueryInfo); pReducer->resColModel = finalmodel; pReducer->resColModel->maxCapacity = pReducer->nResultBufSize / finalRowLength; assert(finalRowLength <= pReducer->rowSize); @@ -294,33 +299,40 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd } pReducer->pTempBuffer->numOfElems = 0; - pReducer->pResInfo = calloc((size_t)pCmd->fieldsInfo.numOfOutputCols, sizeof(SResultInfo)); + pReducer->pResInfo = calloc((size_t)pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(SResultInfo)); - tscCreateResPointerInfo(pCmd, pRes); + tscCreateResPointerInfo(pRes, pQueryInfo); tscInitSqlContext(pCmd, pRes, pReducer, pDesc); // we change the maxCapacity of schema to denote that there is only one row in temp buffer pReducer->pDesc->pSchema->maxCapacity = 1; - pReducer->offset = pCmd->limit.offset; - + + //restore the limitation value at the last stage + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + pQueryInfo->limit.limit = pQueryInfo->clauseLimit; + pQueryInfo->limit.offset = pQueryInfo->prjOffset; + } + + pReducer->offset = pQueryInfo->limit.offset; + pRes->pLocalReducer = pReducer; pRes->numOfGroups = 0; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int16_t prec = pMeterMetaInfo->pMeterMeta->precision; - int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; - int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, prec); + int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime; + int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, prec); SInterpolationInfo *pInterpoInfo = &pReducer->interpolationInfo; - taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupCols, + taosInitInterpoInfo(pInterpoInfo, pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, pReducer->rowSize); - int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols; + int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols; - if (pCmd->groupbyExpr.numOfGroupCols > 0) { - pInterpoInfo->pTags[0] = (char *)pInterpoInfo->pTags + POINTER_BYTES * pCmd->groupbyExpr.numOfGroupCols; - for (int32_t i = 1; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { + pInterpoInfo->pTags[0] = (char *)pInterpoInfo->pTags + POINTER_BYTES * pQueryInfo->groupbyExpr.numOfGroupCols; + for (int32_t i = 1; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { pInterpoInfo->pTags[i] = pReducer->resColModel->pFields[startIndex + i - 1].bytes + pInterpoInfo->pTags[i - 1]; } } else { @@ -342,7 +354,7 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor } #ifdef _DEBUG_VIEW - printf("%ld rows data flushed to disk after been sorted:\n", pPage->numOfElems); + printf("%" PRIu64 " rows data flushed to disk after been sorted:\n", pPage->numOfElems); tColModelDisplay(pDesc->pSchema, pPage->data, pPage->numOfElems, pPage->numOfElems); #endif @@ -429,7 +441,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { } SSqlCmd *pCmd = &pSql->cmd; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + // there is no more result, so we release all allocated resource SLocalReducer *pLocalReducer = (SLocalReducer*)atomic_exchange_ptr(&pRes->pLocalReducer, NULL); if (pLocalReducer != NULL) { @@ -440,15 +453,18 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { tscTrace("%p waiting for delete procedure, status: %d", pSql, status); } - tfree(pLocalReducer->interpolationInfo.prevValues); - tfree(pLocalReducer->interpolationInfo.pTags); + taosDestoryInterpoInfo(&pLocalReducer->interpolationInfo); if (pLocalReducer->pCtx != NULL) { - for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + for(int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i]; + tVariantDestroy(&pCtx->tag); + if (pCtx->tagInfo.pTagCtxList != NULL) { + tfree(pCtx->tagInfo.pTagCtxList); + } } - + tfree(pLocalReducer->pCtx); } @@ -459,7 +475,7 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { tfree(pLocalReducer->pResultBuf); if (pLocalReducer->pResInfo != NULL) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { tfree(pLocalReducer->pResInfo[i].interResultBuf); } @@ -494,12 +510,14 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, tColModel *pModel) { int32_t numOfGroupByCols = 0; - if (pCmd->groupbyExpr.numOfGroupCols > 0) { - numOfGroupByCols = pCmd->groupbyExpr.numOfGroupCols; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { + numOfGroupByCols = pQueryInfo->groupbyExpr.numOfGroupCols; } // primary timestamp column is involved in final result - if (pCmd->nAggTimeInterval != 0) { + if (pQueryInfo->nAggTimeInterval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { numOfGroupByCols++; } @@ -509,20 +527,20 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm } if (numOfGroupByCols > 0) { - int32_t startCols = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols; + int32_t startCols = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols; // tags value locate at the last columns - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { orderIdx[i] = startCols++; } - if (pCmd->nAggTimeInterval != 0) { + if (pQueryInfo->nAggTimeInterval != 0) { // the first column is the timestamp, handles queries like "interval(10m) group by tags" orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } - *pOrderDesc = tOrderDesCreate(orderIdx, numOfGroupByCols, pModel, pCmd->order.order); + *pOrderDesc = tOrderDesCreate(orderIdx, numOfGroupByCols, pModel, pQueryInfo->order.order); tfree(orderIdx); if (*pOrderDesc == NULL) { @@ -533,9 +551,17 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm } bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) { - int16_t functionId = tscSqlExprGet(pCmd, 0)->functionId; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId; // disable merge procedure for column projection query + assert(functionId != TSDB_FUNC_ARITHM); + + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + return true; + } + if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { return false; } @@ -550,10 +576,10 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage if (pOrderDesc->orderIdx.pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { //<= 0 // super table interval query - assert(pCmd->nAggTimeInterval > 0); + assert(pQueryInfo->nAggTimeInterval > 0); pOrderDesc->orderIdx.numOfOrderedCols -= 1; } else { // simple group by query - assert(pCmd->nAggTimeInterval == 0); + assert(pQueryInfo->nAggTimeInterval == 0); } // only one row exists @@ -572,7 +598,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr tColModel *pModel = NULL; *pFinalModel = NULL; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); (*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pMeterMetaInfo->pMetricMeta->numOfVnodes); if (*pMemBuffer == NULL) { @@ -581,7 +608,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr return pRes->code; } - pSchema = (SSchema *)calloc(1, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols); + pSchema = (SSchema *)calloc(1, sizeof(SSchema) * pQueryInfo->fieldsInfo.numOfOutputCols); if (pSchema == NULL) { tscError("%p failed to allocate memory", pSql); pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; @@ -589,8 +616,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr } int32_t rlen = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); pSchema[i].bytes = pExpr->resBytes; pSchema[i].type = pExpr->resType; @@ -598,8 +625,12 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr rlen += pExpr->resBytes; } - int32_t capacity = nBufferSizes / rlen; - pModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity); + int32_t capacity = 0; + if (rlen != 0) { + capacity = nBufferSizes / rlen; + } + + pModel = tColModelCreate(pSchema, pQueryInfo->fieldsInfo.numOfOutputCols, capacity); for (int32_t i = 0; i < pMeterMetaInfo->pMetricMeta->numOfVnodes; ++i) { char tmpPath[512] = {0}; @@ -615,16 +646,16 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr return pRes->code; } - memset(pSchema, 0, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + memset(pSchema, 0, sizeof(SSchema) * pQueryInfo->fieldsInfo.numOfOutputCols); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); pSchema[i].type = pField->type; pSchema[i].bytes = pField->bytes; strcpy(pSchema[i].name, pField->name); } - *pFinalModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity); + *pFinalModel = tColModelCreate(pSchema, pQueryInfo->fieldsInfo.numOfOutputCols, capacity); tfree(pSchema); return TSDB_CODE_SUCCESS; @@ -714,15 +745,15 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource * } } -void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SSqlCmd *pCmd, SInterpolationInfo *pInterpoInfo) { +void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo, SInterpolationInfo *pInterpoInfo) { // discard following dataset in the same group and reset the interpolation information - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int16_t prec = pMeterMetaInfo->pMeterMeta->precision; - int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; - int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, prec); + int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime; + int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, prec); - taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupCols, + taosInitInterpoInfo(pInterpoInfo, pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, pLocalReducer->rowSize); pLocalReducer->discard = true; @@ -733,11 +764,12 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SSqlCmd *pC } // todo merge with following function -static void reversedCopyResultToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage *pFinalDataPage) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); +static void reversedCopyResultToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage *pFinalDataPage) { + + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); - int32_t offset = tscFieldInfoGetOffset(pCmd, i); + int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i); char * src = pFinalDataPage->data + (pRes->numOfRows - 1) * pField->bytes + pRes->numOfRows * offset; char * dst = pRes->data + pRes->numOfRows * offset; @@ -749,12 +781,11 @@ static void reversedCopyResultToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage * } } -static void reversedCopyFromInterpolationToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage **pResPages, - SLocalReducer *pLocalReducer) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); +static void reversedCopyFromInterpolationToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage **pResPages, SLocalReducer *pLocalReducer) { + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); - int32_t offset = tscFieldInfoGetOffset(pCmd, i); + int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i); assert(offset == pLocalReducer->resColModel->colOffset[i]); char *src = pResPages[i]->data + (pRes->numOfRows - 1) * pField->bytes; @@ -776,7 +807,8 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; tFilePage *pFinalDataPage = pLocalReducer->pResultBuf; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (pRes->pLocalReducer != pLocalReducer) { /* * Release the SSqlObj is called, and it is int destroying function invoked by other thread. @@ -786,111 +818,112 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo assert(pRes->pLocalReducer == NULL); } - if (pCmd->nAggTimeInterval == 0 || pCmd->interpoType == TSDB_INTERPO_NONE) { + if (pQueryInfo->nAggTimeInterval == 0 || pQueryInfo->interpoType == TSDB_INTERPO_NONE) { // no interval query, no interpolation pRes->data = pLocalReducer->pFinalRes; pRes->numOfRows = pFinalDataPage->numOfElems; - pRes->numOfTotal += pRes->numOfRows; + pRes->numOfTotalInCurrentClause += pRes->numOfRows; - if (pCmd->limit.offset > 0) { - if (pCmd->limit.offset < pRes->numOfRows) { + if (pQueryInfo->limit.offset > 0) { + if (pQueryInfo->limit.offset < pRes->numOfRows) { int32_t prevSize = pFinalDataPage->numOfElems; - tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, pCmd->limit.offset - 1); + tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, pQueryInfo->limit.offset - 1); /* remove the hole in column model */ tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); - pRes->numOfRows -= pCmd->limit.offset; - pRes->numOfTotal -= pCmd->limit.offset; - pCmd->limit.offset = 0; + pRes->numOfRows -= pQueryInfo->limit.offset; + pRes->numOfTotalInCurrentClause -= pQueryInfo->limit.offset; + pQueryInfo->limit.offset = 0; } else { - pCmd->limit.offset -= pRes->numOfRows; + pQueryInfo->limit.offset -= pRes->numOfRows; pRes->numOfRows = 0; - pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; } } - if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) { + if (pQueryInfo->limit.limit >= 0 && pRes->numOfTotalInCurrentClause > pQueryInfo->limit.limit) { /* impose the limitation of output rows on the final result */ int32_t prevSize = pFinalDataPage->numOfElems; - int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit; + int32_t overFlow = pRes->numOfTotalInCurrentClause - pQueryInfo->limit.limit; assert(overFlow < pRes->numOfRows); - pRes->numOfTotal = pCmd->limit.limit; + pRes->numOfTotalInCurrentClause = pQueryInfo->limit.limit; pRes->numOfRows -= overFlow; pFinalDataPage->numOfElems -= overFlow; tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, &pLocalReducer->interpolationInfo); + savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, &pLocalReducer->interpolationInfo); } - int32_t rowSize = tscGetResRowLength(pCmd); + int32_t rowSize = tscGetResRowLength(pQueryInfo); // handle the descend order output - if (pCmd->order.order == TSQL_SO_ASC) { +// if (pQueryInfo->order.order == TSQL_SO_ASC) { memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * rowSize); - } else { - reversedCopyResultToDstBuf(pCmd, pRes, pFinalDataPage); - } +// } else { +// reversedCopyResultToDstBuf(pQueryInfo, pRes, pFinalDataPage); +// } pFinalDataPage->numOfElems = 0; return; } - int64_t * pPrimaryKeys = (int64_t *)pLocalReducer->pBufForInterpo; + int64_t *pPrimaryKeys = (int64_t *)pLocalReducer->pBufForInterpo; + SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; - int64_t actualETime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime; + int64_t actualETime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->etime : pQueryInfo->stime; - tFilePage **pResPages = malloc(POINTER_BYTES * pCmd->fieldsInfo.numOfOutputCols); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutputCols); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->maxCapacity); } - char ** srcData = (char **)malloc((POINTER_BYTES + sizeof(int32_t)) * pCmd->fieldsInfo.numOfOutputCols); - int32_t *functions = (int32_t *)((char *)srcData + pCmd->fieldsInfo.numOfOutputCols * sizeof(void *)); + char ** srcData = (char **)malloc((POINTER_BYTES + sizeof(int32_t)) * pQueryInfo->fieldsInfo.numOfOutputCols); + int32_t *functions = (int32_t *)((char *)srcData + pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(void *)); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - srcData[i] = pLocalReducer->pBufForInterpo + tscFieldInfoGetOffset(pCmd, i) * pInterpoInfo->numOfRawDataInRows; - functions[i] = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + srcData[i] = pLocalReducer->pBufForInterpo + tscFieldInfoGetOffset(pQueryInfo, i) * pInterpoInfo->numOfRawDataInRows; + functions[i] = tscSqlExprGet(pQueryInfo, i)->functionId; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int8_t precision = pMeterMetaInfo->pMeterMeta->precision; while (1) { int32_t remains = taosNumOfRemainPoints(pInterpoInfo); - TSKEY etime = taosGetRevisedEndKey(actualETime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, + TSKEY etime = taosGetRevisedEndKey(actualETime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision); - int32_t nrows = taosGetNumOfResultWithInterpo(pInterpoInfo, pPrimaryKeys, remains, pCmd->nAggTimeInterval, etime, + int32_t nrows = taosGetNumOfResultWithInterpo(pInterpoInfo, pPrimaryKeys, remains, pQueryInfo->nAggTimeInterval, etime, pLocalReducer->resColModel->maxCapacity); - int32_t newRows = taosDoInterpoResult(pInterpoInfo, pCmd->interpoType, pResPages, remains, nrows, - pCmd->nAggTimeInterval, pPrimaryKeys, pLocalReducer->resColModel, srcData, - pCmd->defaultVal, functions, pLocalReducer->resColModel->maxCapacity); + int32_t newRows = taosDoInterpoResult(pInterpoInfo, pQueryInfo->interpoType, pResPages, remains, nrows, + pQueryInfo->nAggTimeInterval, pPrimaryKeys, pLocalReducer->resColModel, srcData, + pQueryInfo->defaultVal, functions, pLocalReducer->resColModel->maxCapacity); assert(newRows <= nrows); - if (pCmd->limit.offset < newRows) { - newRows -= pCmd->limit.offset; + if (pQueryInfo->limit.offset < newRows) { + newRows -= pQueryInfo->limit.offset; - if (pCmd->limit.offset > 0) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pCmd->limit.offset, newRows * pField->bytes); + if (pQueryInfo->limit.offset > 0) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); + memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pQueryInfo->limit.offset, newRows * pField->bytes); } } pRes->data = pLocalReducer->pFinalRes; pRes->numOfRows = newRows; - pRes->numOfTotal += newRows; + pRes->numOfTotalInCurrentClause += newRows; - pCmd->limit.offset = 0; + pQueryInfo->limit.offset = 0; break; } else { - pCmd->limit.offset -= newRows; + pQueryInfo->limit.offset -= newRows; pRes->numOfRows = 0; int32_t rpoints = taosNumOfRemainPoints(pInterpoInfo); @@ -902,7 +935,7 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo /* all output for current group are completed */ int32_t totalRemainRows = - taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeys, rpoints, pCmd->nAggTimeInterval, actualETime); + taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeys, rpoints, pQueryInfo->nAggTimeInterval, actualETime); if (totalRemainRows <= 0) { break; } @@ -911,33 +944,33 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo } if (pRes->numOfRows > 0) { - if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) { - int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit; + if (pQueryInfo->limit.limit >= 0 && pRes->numOfTotalInCurrentClause > pQueryInfo->limit.limit) { + int32_t overFlow = pRes->numOfTotalInCurrentClause - pQueryInfo->limit.limit; pRes->numOfRows -= overFlow; assert(pRes->numOfRows >= 0); - pRes->numOfTotal = pCmd->limit.limit; + pRes->numOfTotalInCurrentClause = pQueryInfo->limit.limit; pFinalDataPage->numOfElems -= overFlow; /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, pInterpoInfo); + savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pInterpoInfo); } - if (pCmd->order.order == TSQL_SO_ASC) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + if (pQueryInfo->order.order == TSQL_SO_ASC) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); memcpy(pRes->data + pLocalReducer->resColModel->colOffset[i] * pRes->numOfRows, pResPages[i]->data, pField->bytes * pRes->numOfRows); } } else { - reversedCopyFromInterpolationToDstBuf(pCmd, pRes, pResPages, pLocalReducer); + reversedCopyFromInterpolationToDstBuf(pQueryInfo, pRes, pResPages, pLocalReducer); } } pFinalDataPage->numOfElems = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { tfree(pResPages[i]); } tfree(pResPages); @@ -961,8 +994,10 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) static void doExecuteSecondaryMerge(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, bool needInit) { // the tag columns need to be set before all functions execution - for(int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - SSqlExpr * pExpr = tscSqlExprGet(pCmd, j); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + for(int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, j); SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j]; tVariantAssign(&pCtx->param[0], &pExpr->param[0]); @@ -981,8 +1016,8 @@ static void doExecuteSecondaryMerge(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, } } - for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - int32_t functionId = tscSqlExprGet(pCmd, j)->functionId; + for (int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId; if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { continue; } @@ -999,11 +1034,11 @@ static void handleUnprocessedRow(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, tF } } -static int64_t getNumOfResultLocal(SSqlCmd *pCmd, SQLFunctionCtx *pCtx) { +static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) { int64_t maxOutput = 0; - - for (int32_t j = 0; j < pCmd->exprsInfo.numOfExprs; ++j) { - int32_t functionId = tscSqlExprGet(pCmd, j)->functionId; + + for (int32_t j = 0; j < pQueryInfo->exprsInfo.numOfExprs; ++j) { + int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId; /* * ts, tag, tagprj function can not decide the output number of current query @@ -1026,10 +1061,10 @@ static int64_t getNumOfResultLocal(SSqlCmd *pCmd, SQLFunctionCtx *pCtx) { * filled with the same result, which is the tags, specified in group by clause * */ -static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReducer *pLocalReducer) { +static void fillMultiRowsOfTagsVal(SQueryInfo* pQueryInfo, int32_t numOfRes, SLocalReducer *pLocalReducer) { int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); if (maxBufSize < pExpr->resBytes && pExpr->functionId == TSDB_FUNC_TAG) { maxBufSize = pExpr->resBytes; } @@ -1038,8 +1073,8 @@ static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReduce assert(maxBufSize >= 0); char *buf = malloc((size_t) maxBufSize); - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); if (pExpr->functionId != TSDB_FUNC_TAG) { continue; } @@ -1059,9 +1094,9 @@ static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReduce free(buf); } -int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); +int32_t finalizeRes(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer) { + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); aAggs[pExpr->functionId].xFinalize(&pLocalReducer->pCtx[k]); // allow to re-initialize for the next round @@ -1070,10 +1105,10 @@ int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { pLocalReducer->hasPrevRow = false; - int32_t numOfRes = (int32_t)getNumOfResultLocal(pCmd, pLocalReducer->pCtx); + int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalReducer->pCtx); pLocalReducer->pResultBuf->numOfElems += numOfRes; - fillMultiRowsOfTagsVal(pCmd, numOfRes, pLocalReducer); + fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalReducer); return numOfRes; } @@ -1084,9 +1119,9 @@ int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { * results generated by simple aggregation function, we merge them all into one points * *Exception*: column projection query, required no merge procedure */ -bool needToMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { +bool needToMerge(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { int32_t ret = 0; // merge all result by default - int16_t functionId = tscSqlExprGet(pCmd, 0)->functionId; + int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId; if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query ret = 1; // disable merge procedure @@ -1106,24 +1141,25 @@ bool needToMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuff return (ret == 0); } -static bool reachGroupResultLimit(SSqlCmd *pCmd, SSqlRes *pRes) { - return (pRes->numOfGroups >= pCmd->slimit.limit && pCmd->slimit.limit >= 0); +static bool reachGroupResultLimit(SQueryInfo* pQueryInfo, SSqlRes *pRes) { + return (pRes->numOfGroups >= pQueryInfo->slimit.limit && pQueryInfo->slimit.limit >= 0); } static bool saveGroupResultInfo(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); pRes->numOfGroups += 1; // the output group is limited by the slimit clause - if (reachGroupResultLimit(pCmd, pRes)) { + if (reachGroupResultLimit(pQueryInfo, pRes)) { return true; } // pRes->pGroupRec = realloc(pRes->pGroupRec, pRes->numOfGroups*sizeof(SResRec)); // pRes->pGroupRec[pRes->numOfGroups-1].numOfRows = pRes->numOfRows; - // pRes->pGroupRec[pRes->numOfGroups-1].numOfTotal = pRes->numOfTotal; + // pRes->pGroupRec[pRes->numOfGroups-1].numOfTotalInCurrentClause = pRes->numOfTotalInCurrentClause; return false; } @@ -1138,6 +1174,8 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) { SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); tFilePage *pResBuf = pLocalReducer->pResultBuf; tColModel *pModel = pLocalReducer->resColModel; @@ -1147,9 +1185,9 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no * ignore the output of the current group since this group is skipped by user * We set the numOfRows to be 0 and discard the possible remain results. */ - if (pCmd->slimit.offset > 0) { + if (pQueryInfo->slimit.offset > 0) { pRes->numOfRows = 0; - pCmd->slimit.offset -= 1; + pQueryInfo->slimit.offset -= 1; pLocalReducer->discard = !noMoreCurrentGroupRes; return false; } @@ -1163,24 +1201,24 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no #endif SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; - int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols; + int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols; - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { memcpy(pInterpoInfo->pTags[i], pLocalReducer->pBufForInterpo + pModel->colOffset[startIndex + i] * pResBuf->numOfElems, pModel->pFields[startIndex + i].bytes); } - taosInterpoSetStartInfo(&pLocalReducer->interpolationInfo, pResBuf->numOfElems, pCmd->interpoType); + taosInterpoSetStartInfo(&pLocalReducer->interpolationInfo, pResBuf->numOfElems, pQueryInfo->interpoType); doInterpolateResult(pSql, pLocalReducer, noMoreCurrentGroupRes); return true; } -void resetOutputBuf(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { +void resetOutputBuf(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { pLocalReducer->pCtx[i].aOutputBuf = - pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pLocalReducer->resColModel->maxCapacity; + pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->maxCapacity; } memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage)); @@ -1189,18 +1227,21 @@ void resetOutputBuf(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // reset out static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // In handling data in other groups, we need to reset the interpolation information for a new group data pRes->numOfRows = 0; - pRes->numOfTotal = 0; - pCmd->limit.offset = pLocalReducer->offset; + pRes->numOfTotalInCurrentClause = 0; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + pQueryInfo->limit.offset = pLocalReducer->offset; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int16_t precision = pMeterMetaInfo->pMeterMeta->precision; // for group result interpolation, do not return if not data is generated - if (pCmd->interpoType != TSDB_INTERPO_NONE) { - int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; - int64_t newTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, precision); + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime; + int64_t newTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision); - taosInitInterpoInfo(&pLocalReducer->interpolationInfo, pCmd->order.order, newTime, pCmd->groupbyExpr.numOfGroupCols, + taosInitInterpoInfo(&pLocalReducer->interpolationInfo, pQueryInfo->order.order, newTime, pQueryInfo->groupbyExpr.numOfGroupCols, pLocalReducer->rowSize); } } @@ -1213,22 +1254,23 @@ static bool doInterpolationForCurrentGroup(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SLocalReducer * pLocalReducer = pRes->pLocalReducer; SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int8_t p = pMeterMetaInfo->pMeterMeta->precision; if (taosHasRemainsDataForInterpolation(pInterpoInfo)) { - assert(pCmd->interpoType != TSDB_INTERPO_NONE); + assert(pQueryInfo->interpoType != TSDB_INTERPO_NONE); tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf; int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pInterpoInfo->numOfRawDataInRows - 1)); int32_t remain = taosNumOfRemainPoints(pInterpoInfo); - TSKEY ekey = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, p); + TSKEY ekey = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, p); int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY *)pLocalReducer->pBufForInterpo, remain, - pCmd->nAggTimeInterval, ekey, pLocalReducer->resColModel->maxCapacity); + pQueryInfo->nAggTimeInterval, ekey, pLocalReducer->resColModel->maxCapacity); if (rows > 0) { // do interpo doInterpolateResult(pSql, pLocalReducer, false); } @@ -1248,17 +1290,18 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int8_t precision = pMeterMetaInfo->pMeterMeta->precision; if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL || prevGroupCompleted) { // if interpoType == TSDB_INTERPO_NONE, return directly - if (pCmd->interpoType != TSDB_INTERPO_NONE) { - int64_t etime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime; + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + int64_t etime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->etime : pQueryInfo->stime; - etime = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, precision); - int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, NULL, 0, pCmd->nAggTimeInterval, etime, + etime = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision); + int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, NULL, 0, pQueryInfo->nAggTimeInterval, etime, pLocalReducer->resColModel->maxCapacity); if (rows > 0) { // do interpo doInterpolateResult(pSql, pLocalReducer, true); @@ -1289,10 +1332,12 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { static void doMergeWithPrevRows(SSqlObj *pSql, int32_t numOfRes) { SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; + SLocalReducer *pLocalReducer = pRes->pLocalReducer; - - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k]; pCtx->aOutputBuf += pCtx->outputBytes * numOfRes; @@ -1306,26 +1351,22 @@ static void doMergeWithPrevRows(SSqlObj *pSql, int32_t numOfRes) { doExecuteSecondaryMerge(pCmd, pLocalReducer, true); } -int32_t tscLocalDoReduce(SSqlObj *pSql) { +int32_t tscDoLocalreduce(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - + + tscResetForNextRetrieve(pRes); + if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed tscTrace("%s call the drop local reducer", __FUNCTION__); tscDestroyLocalReducer(pSql); - if (pRes) { - pRes->numOfRows = 0; - pRes->row = 0; - } return 0; } - - pRes->row = 0; - pRes->numOfRows = 0; - + SLocalReducer *pLocalReducer = pRes->pLocalReducer; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + // set the data merge in progress int32_t prevStatus = atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS); @@ -1372,7 +1413,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { #if defined(_DEBUG_VIEW) printf("chosen row:\t"); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, pCmd); + tscGetSrcColumnInfo(colInfo, pQueryInfo); tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->numOfElems, pModel->maxCapacity, colInfo); #endif @@ -1408,7 +1449,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { } if (pLocalReducer->hasPrevRow) { - if (needToMerge(pCmd, pLocalReducer, tmpBuffer)) { + if (needToMerge(pQueryInfo, pLocalReducer, tmpBuffer)) { // belong to the group of the previous row, continue process it doExecuteSecondaryMerge(pCmd, pLocalReducer, false); @@ -1419,7 +1460,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { * current row does not belong to the group of previous row. * so the processing of previous group is completed. */ - int32_t numOfRes = finalizeRes(pCmd, pLocalReducer); + int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer); bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer); tFilePage *pResBuf = pLocalReducer->pResultBuf; @@ -1442,7 +1483,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { pLocalReducer->hasUnprocessedRow = true; } - resetOutputBuf(pCmd, pLocalReducer); + resetOutputBuf(pQueryInfo, pLocalReducer); pOneDataSrc->rowIdx += 1; // here we do not check the return value @@ -1496,7 +1537,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { } if (pLocalReducer->hasPrevRow) { - finalizeRes(pCmd, pLocalReducer); + finalizeRes(pQueryInfo, pLocalReducer); } if (pLocalReducer->pResultBuf->numOfElems) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index ba80710d45688f7e381dbd1f321e4dce0bd78bae..677f728472382fe35990a4aea2c8f6e2eb4a8505 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -18,46 +18,81 @@ #include "trpc.h" #include "tscJoinProcess.h" #include "tscProfile.h" +#include "tscSQLParser.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" #include "tscompression.h" #include "tsocket.h" -#include "tscSQLParser.h" #include "ttime.h" #include "ttimer.h" #include "tutil.h" #define TSC_MGMT_VNODE 999 -#ifdef CLUSTER - SIpStrList tscMgmtIpList; - int tsMasterIndex = 0; - int tsSlaveIndex = 1; -#else - int tsMasterIndex = 0; - int tsSlaveIndex = 0; // slave == master for single node edition - uint32_t tsServerIp; -#endif +SIpStrList tscMgmtIpList; +int tsMasterIndex = 0; +int tsSlaveIndex = 1; + +int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; -int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql); int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql); +char *doBuildMsgHeader(SSqlObj *pSql, char **pStart); void (*tscUpdateVnodeMsg[TSDB_SQL_MAX])(SSqlObj *pSql, char *buf); void tscProcessActivityTimer(void *handle, void *tmrId); int tscKeepConn[TSDB_SQL_MAX] = {0}; +TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid); +void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts); +void tscSaveSubscriptionProgress(void* sub); static int32_t minMsgSize() { return tsRpcHeadSize + sizeof(STaosDigest); } -#ifdef CLUSTER void tscPrintMgmtIp() { if (tscMgmtIpList.numOfIps <= 0) { - tscError("invalid IP list:%d", tscMgmtIpList.numOfIps); + tscError("invalid mgmt IP list:%d", tscMgmtIpList.numOfIps); } else { - for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) tscTrace("mgmt index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); + for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) { + tscTrace("mgmt index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); + } + } +} + +void tscSetMgmtIpListFromCluster(SIpList *pIpList) { + tscMgmtIpList.numOfIps = pIpList->numOfIps; + if (memcmp(tscMgmtIpList.ip, pIpList->ip, pIpList->numOfIps * 4) != 0) { + for (int i = 0; i < pIpList->numOfIps; ++i) { + tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); + tscMgmtIpList.ip[i] = pIpList->ip[i]; + } + tscTrace("cluster mgmt IP list:"); + tscPrintMgmtIp(); + } +} + +void tscSetMgmtIpListFromEdge() { + if (tscMgmtIpList.numOfIps != 2) { + tscMgmtIpList.numOfIps = 2; + strcpy(tscMgmtIpList.ipstr[0], tsMasterIp); + tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); + strcpy(tscMgmtIpList.ipstr[1], tsMasterIp); + tscMgmtIpList.ip[1] = inet_addr(tsMasterIp); + tscTrace("edge mgmt IP list:"); + tscPrintMgmtIp(); + } +} + +void tscSetMgmtIpList(SIpList *pIpList) { + /* + * The iplist returned by the cluster edition is the current management nodes + * and the iplist returned by the edge edition is empty + */ + if (pIpList->numOfIps != 0) { + tscSetMgmtIpListFromCluster(pIpList); + } else { + tscSetMgmtIpListFromEdge(); } } -#endif /* * For each management node, try twice at least in case of poor network situation. @@ -68,11 +103,7 @@ void tscPrintMgmtIp() { */ static int32_t tscGetMgmtConnMaxRetryTimes() { int32_t factor = 2; -#ifdef CLUSTER return tscMgmtIpList.numOfIps * factor; -#else - return 1*factor; -#endif } void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { @@ -88,18 +119,9 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { if (code == 0) { SHeartBeatRsp *pRsp = (SHeartBeatRsp *)pRes->pRsp; -#ifdef CLUSTER SIpList * pIpList = &pRsp->ipList; - tscMgmtIpList.numOfIps = pIpList->numOfIps; - if (memcmp(tscMgmtIpList.ip, pIpList->ip, pIpList->numOfIps * 4) != 0) { - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - } - tscTrace("new mgmt IP list:"); - tscPrintMgmtIp(); - } -#endif + tscSetMgmtIpList(pIpList); + if (pRsp->killConnection) { tscKillConnection(pObj); } else { @@ -125,7 +147,11 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { if (NULL == pSql) return; pSql->fp = tscProcessHeartBeatRsp; - pSql->cmd.command = TSDB_SQL_HB; + + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo); + pQueryInfo->command = TSDB_SQL_HB; + if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) { tfree(pSql); return; @@ -135,6 +161,8 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { pSql->pTscObj = pObj; pSql->signature = pSql; pObj->pHb = pSql; + tscAddSubqueryInfo(&pObj->pHb->cmd); + tscTrace("%p pHb is allocated, pObj:%p", pObj->pHb, pObj); } @@ -152,19 +180,12 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { STscObj *pTscObj = pSql->pTscObj; -#ifdef CLUSTER if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { *pCode = 0; pSql->retry++; pSql->index = pSql->index % tscMgmtIpList.numOfIps; if (pSql->cmd.command > TSDB_SQL_READ && pSql->index == 0) pSql->index = 1; void *thandle = taosGetConnFromCache(tscConnCache, tscMgmtIpList.ip[pSql->index], TSC_MGMT_VNODE, pTscObj->user); -#else - if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { - *pCode = 0; - pSql->retry++; - void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, TSC_MGMT_VNODE, pTscObj->user); -#endif if (thandle == NULL) { SRpcConnInit connInit; @@ -180,32 +201,23 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { connInit.encrypt = 0; connInit.secret = pSql->pTscObj->pass; -#ifdef CLUSTER connInit.peerIp = tscMgmtIpList.ipstr[pSql->index]; -#else - connInit.peerIp = tsMasterIp; -#endif thandle = taosOpenRpcConn(&connInit, pCode); } pSql->thandle = thandle; -#ifdef CLUSTER pSql->ip = tscMgmtIpList.ip[pSql->index]; pSql->vnode = TSC_MGMT_VNODE; tscTrace("%p mgmt index:%d ip:0x%x is picked up, pConn:%p", pSql, pSql->index, tscMgmtIpList.ip[pSql->index], pSql->thandle); -#else - pSql->ip = tsServerIp; - pSql->vnode = TSC_MGMT_VNODE; -#endif } - + // the pSql->res.code is the previous error(status) code. if (pSql->thandle == NULL && pSql->retry >= pSql->maxRetry) { if (pSql->res.code != TSDB_CODE_SUCCESS && pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS) { *pCode = pSql->res.code; } - + tscError("%p reach the max retry:%d, code:%d", pSql, pSql->retry, *pCode); } } @@ -218,9 +230,9 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { pSql->thandle = NULL; SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // multiple vnode query + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { // multiple vnode query SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex); if (vnodeList != NULL) { pVPeersDesc = vnodeList->vpeerDesc; @@ -242,11 +254,13 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { while (pSql->retry < pSql->maxRetry) { (pSql->retry)++; -#ifdef CLUSTER char ipstr[40] = {0}; if (pVPeersDesc[pSql->index].ip == 0) { - (pSql->index) = (pSql->index + 1) % TSDB_VNODES_SUPPORT; - continue; + /* + * in the edge edition, ip is 0, and at this time we use masterIp instead + * in the cluster edition, ip is vnode ip + */ + pVPeersDesc[pSql->index].ip = tscMgmtIpList.ip[0]; } *pCode = TSDB_CODE_SUCCESS; @@ -276,41 +290,16 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { pSql->vnode = pVPeersDesc[pSql->index].vnode; tscTrace("%p vnode:%d ip:%p index:%d is picked up, pConn:%p", pSql, pVPeersDesc[pSql->index].vnode, pVPeersDesc[pSql->index].ip, pSql->index, pSql->thandle); -#else - *pCode = 0; - void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, pVPeersDesc[0].vnode, pTscObj->user); - - if (thandle == NULL) { - SRpcConnInit connInit; - memset(&connInit, 0, sizeof(connInit)); - connInit.cid = vidIndex; - connInit.sid = 0; - connInit.spi = 0; - connInit.encrypt = 0; - connInit.meterId = pSql->pTscObj->user; - connInit.peerId = htonl((pVPeersDesc[0].vnode << TSDB_SHELL_VNODE_BITS)); - connInit.shandle = pVnodeConn; - connInit.ahandle = pSql; - connInit.peerIp = tsMasterIp; - connInit.peerPort = tsVnodeShellPort; - thandle = taosOpenRpcConn(&connInit, pCode); - vidIndex = (vidIndex + 1) % tscNumOfThreads; - } - - pSql->thandle = thandle; - pSql->ip = tsServerIp; - pSql->vnode = pVPeersDesc[0].vnode; -#endif break; } - + // the pSql->res.code is the previous error(status) code. if (pSql->thandle == NULL && pSql->retry >= pSql->maxRetry) { if (pSql->res.code != TSDB_CODE_SUCCESS && pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS) { *pCode = pSql->res.code; } - + tscError("%p reach the max retry:%d, code:%d", pSql, pSql->retry, *pCode); } } @@ -352,14 +341,14 @@ int tscSendMsgToServer(SSqlObj *pSql) { * this SQL object may be released by other thread due to the completion of this query even before the log * is dumped to log file. So the signature needs to be kept in a local variable. */ - uint64_t signature = (uint64_t) pSql->signature; + uint64_t signature = (uint64_t)pSql->signature; if (tscUpdateVnodeMsg[pSql->cmd.command]) (*tscUpdateVnodeMsg[pSql->cmd.command])(pSql, buf); - + int ret = taosSendMsgToPeerH(pSql->thandle, pStart, pSql->cmd.payloadLen, pSql); if (ret >= 0) { code = 0; } - + tscTrace("%p send msg ret:%d code:%d sig:%p", pSql, ret, code, signature); } } @@ -367,15 +356,9 @@ int tscSendMsgToServer(SSqlObj *pSql) { return code; } -#ifdef CLUSTER void tscProcessMgmtRedirect(SSqlObj *pSql, uint8_t *cont) { SIpList *pIpList = (SIpList *)(cont); - tscMgmtIpList.numOfIps = pIpList->numOfIps; - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - tscTrace("Update mgmt Ip, index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); - } + tscSetMgmtIpList(pIpList); if (pSql->cmd.command < TSDB_SQL_READ) { tsMasterIndex = 0; @@ -386,7 +369,6 @@ void tscProcessMgmtRedirect(SSqlObj *pSql, uint8_t *cont) { tscPrintMgmtIp(); } -#endif void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (ahandle == NULL) return NULL; @@ -418,15 +400,11 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { return ahandle; } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); if (msg == NULL) { - tscTrace("%p no response from ip:0x%x", pSql, pSql->ip); - -#ifdef CLUSTER + tscTrace("%p no response from ip:%s", pSql, taosIpStr(pSql->ip)); + pSql->index++; -#else - // for single node situation, do NOT try next index -#endif pSql->thandle = NULL; // todo taos_stop_query() in async model /* @@ -442,12 +420,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { // renew meter meta in case it is changed if (pCmd->command < TSDB_SQL_FETCH && pRes->code != TSDB_CODE_QUERY_CANCELLED) { -#ifdef CLUSTER pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; -#else - // for fetch, it shall not renew meter meta - pSql->maxRetry = 2; -#endif code = tscRenewMeterMeta(pSql, pMeterMetaInfo->name); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; @@ -460,8 +433,6 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { } else { uint16_t rspCode = pMsg->content[0]; -#ifdef CLUSTER - if (rspCode == TSDB_CODE_REDIRECT) { tscTrace("%p it shall be redirected!", pSql); taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); @@ -469,7 +440,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (pCmd->command > TSDB_SQL_MGMT) { tscProcessMgmtRedirect(pSql, pMsg->content + 1); - } else if (pCmd->command == TSDB_SQL_INSERT){ + } else if (pCmd->command == TSDB_SQL_INSERT) { pSql->index++; pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; } else { @@ -480,26 +451,32 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (code == 0) return pSql; msg = NULL; } else if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID || - rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID || - rspCode == TSDB_CODE_TABLE_ID_MISMATCH || rspCode == TSDB_CODE_NETWORK_UNAVAIL) { -#else - if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID || - rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID || - rspCode == TSDB_CODE_TABLE_ID_MISMATCH || rspCode == TSDB_CODE_NETWORK_UNAVAIL) { -#endif - pSql->thandle = NULL; + rspCode == TSDB_CODE_INVALID_VNODE_ID || rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || + rspCode == TSDB_CODE_NETWORK_UNAVAIL || rspCode == TSDB_CODE_NOT_ACTIVE_SESSION || + rspCode == TSDB_CODE_TABLE_ID_MISMATCH) { + /* + * not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized, + * the virtual node may have not create table till now, so try again by using the new metermeta. + * 2. this requested table may have been removed by other client, so we need to renew the + * metermeta here. + * + * not_active_vnode: current vnode is move to other node due to node balance procedure or virtual node have been + * removed. So, renew metermeta and try again. + * not_active_session: db has been move to other node, the vnode does not exist on this dnode anymore. + */ + pSql->thandle = NULL; taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); - + if (pCmd->command == TSDB_SQL_CONNECT) { code = TSDB_CODE_NETWORK_UNAVAIL; } else if (pCmd->command == TSDB_SQL_HB) { code = TSDB_CODE_NOT_READY; } else { tscTrace("%p it shall renew meter meta, code:%d", pSql, rspCode); - + pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; - pSql->res.code = (uint8_t) rspCode; // keep the previous error code - + pSql->res.code = (uint8_t)rspCode; // keep the previous error code + code = tscRenewMeterMeta(pSql, pMeterMetaInfo->name); if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; @@ -631,17 +608,18 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { } static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj); -static int tscLaunchMetricSubQueries(SSqlObj *pSql); +static int tscLaunchSTableSubqueries(SSqlObj *pSql); // todo merge with callback int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySupporter *pSupporter) { - SSqlCmd *pCmd = &pSql->cmd; + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); pSql->res.qhandle = 0x1; pSql->res.numOfRows = 0; if (pSql->pSubs == NULL) { - pSql->pSubs = malloc(POINTER_BYTES * pSupporter->pState->numOfTotal); + pSql->pSubs = calloc(pSupporter->pState->numOfTotal, POINTER_BYTES); if (pSql->pSubs == NULL) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } @@ -651,40 +629,45 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySu if (pNew == NULL) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } - + pSql->pSubs[pSql->numOfSubs++] = pNew; assert(pSql->numOfSubs <= pSupporter->pState->numOfTotal); - if (QUERY_IS_JOIN_QUERY(pCmd->type)) { - addGroupInfoForSubquery(pSql, pNew, tableIndex); + if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { + addGroupInfoForSubquery(pSql, pNew, 0, tableIndex); // refactor as one method - tscColumnBaseInfoUpdateTableIndex(&pNew->cmd.colList, 0); - tscColumnBaseInfoCopy(&pSupporter->colList, &pNew->cmd.colList, 0); + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + assert(pNewQueryInfo != NULL); - tscSqlExprCopy(&pSupporter->exprsInfo, &pNew->cmd.exprsInfo, pSupporter->uid); + tscColumnBaseInfoUpdateTableIndex(&pNewQueryInfo->colList, 0); + tscColumnBaseInfoCopy(&pSupporter->colList, &pNewQueryInfo->colList, 0); - tscFieldInfoCopyAll(&pNew->cmd.fieldsInfo, &pSupporter->fieldsInfo); - tscTagCondCopy(&pSupporter->tagCond, &pNew->cmd.tagCond); - pSupporter->groupbyExpr = pNew->cmd.groupbyExpr; + tscSqlExprCopy(&pSupporter->exprsInfo, &pNewQueryInfo->exprsInfo, pSupporter->uid); + + tscFieldInfoCopyAll(&pSupporter->fieldsInfo, &pNewQueryInfo->fieldsInfo); + tscTagCondCopy(&pSupporter->tagCond, &pNewQueryInfo->tagCond); pNew->cmd.numOfCols = 0; - pNew->cmd.nAggTimeInterval = 0; - memset(&pNew->cmd.limit, 0, sizeof(SLimitVal)); - memset(&pNew->cmd.groupbyExpr, 0, sizeof(SSqlGroupbyExpr)); + pNewQueryInfo->nAggTimeInterval = 0; + memset(&pNewQueryInfo->limit, 0, sizeof(SLimitVal)); + + // backup the data and clear it in the sqlcmd object + pSupporter->groupbyExpr = pNewQueryInfo->groupbyExpr; + memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr)); // set the ts,tags that involved in join, as the output column of intermediate result - tscFreeSqlCmdData(&pNew->cmd); + tscClearSubqueryInfo(&pNew->cmd); SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddSpecialColumnForSelect(&pNew->cmd, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL); + tscAddSpecialColumnForSelect(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL); // set the tags value for ts_comp function - SSqlExpr *pExpr = tscSqlExprGet(&pNew->cmd, 0); + SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0); - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pNewQueryInfo, 0); int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pSupporter->tagCond, pMeterMetaInfo->pMeterMeta->uid); pExpr->param->i64Key = tagColIndex; @@ -694,16 +677,31 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySu for (int32_t i = 0; i < pSupporter->colList.numOfCols; ++i) { SColumnBase *pColBase = &pSupporter->colList.pColList[i]; if (pColBase->numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered. - tscColumnBaseCopy(&pNew->cmd.colList.pColList[pNew->cmd.colList.numOfCols], pColBase); - pNew->cmd.colList.numOfCols++; + tscColumnBaseCopy(&pNewQueryInfo->colList.pColList[pNewQueryInfo->colList.numOfCols], pColBase); + pNewQueryInfo->colList.numOfCols++; } } + + tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, transfer to ts_comp query to retrieve timestamps, " + "exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s", + pSql, pNew, tableIndex, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, + pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols, + pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name); + tscPrintSelectClause(pNew, 0); + + tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, transfer to ts_comp query to retrieve timestamps, " + "exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s", + pSql, pNew, tableIndex, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, + pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols, + pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name); + tscPrintSelectClause(pNew, 0); } else { - pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; } - + #ifdef _DEBUG_VIEW - tscPrintSelectClause(&pNew->cmd); + tscPrintSelectClause(pNew, 0); #endif return tscProcessSql(pNew); @@ -713,30 +711,31 @@ int doProcessSql(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - int32_t code = TSDB_CODE_SUCCESS; - void *asyncFp = pSql->fp; - if (tscBuildMsg[pCmd->command](pSql) < 0) { // build msg failed - code = TSDB_CODE_APP_ERROR; - } else { - code = tscSendMsgToServer(pSql); + if (pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_FETCH || pCmd->command == TSDB_SQL_RETRIEVE || + pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_CONNECT || pCmd->command == TSDB_SQL_HB || + pCmd->command == TSDB_SQL_META || pCmd->command == TSDB_SQL_METRIC) { + tscBuildMsg[pCmd->command](pSql, NULL); } + + int32_t code = tscSendMsgToServer(pSql); + if (asyncFp) { - if (code != 0) { + if (code != TSDB_CODE_SUCCESS) { pRes->code = code; tscQueueAsyncRes(pSql); } return 0; } - if (code != 0) { + if (code != TSDB_CODE_SUCCESS) { pRes->code = code; return code; } tsem_wait(&pSql->rspSem); - if (pRes->code == 0 && tscProcessMsgRsp[pCmd->command]) (*tscProcessMsgRsp[pCmd->command])(pSql); + if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) (*tscProcessMsgRsp[pCmd->command])(pSql); tsem_post(&pSql->emptyRspSem); @@ -744,35 +743,43 @@ int doProcessSql(SSqlObj *pSql) { } int tscProcessSql(SSqlObj *pSql) { - char * name = NULL; - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + char * name = NULL; + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = NULL; + int16_t type = 0; - if (pMeterMetaInfo != NULL) { - name = pMeterMetaInfo->name; + if (pQueryInfo != NULL) { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (pMeterMetaInfo != NULL) { + name = pMeterMetaInfo->name; + } + + type = pQueryInfo->type; + + // for hearbeat, numOfTables == 0; + assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0); } - tscTrace("%p SQL cmd:%d will be processed, name:%s, type:%d", pSql, pSql->cmd.command, name, pSql->cmd.type); + tscTrace("%p SQL cmd:%d will be processed, name:%s, type:%d", pSql, pCmd->command, name, type); pSql->retry = 0; if (pSql->cmd.command < TSDB_SQL_MGMT) { -#ifdef CLUSTER pSql->maxRetry = TSDB_VNODES_SUPPORT; -#else - pSql->maxRetry = 2; -#endif - + // the pMeterMetaInfo cannot be NULL if (pMeterMetaInfo == NULL) { pSql->res.code = TSDB_CODE_OTHERS; return pSql->res.code; } - + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { pSql->index = pMeterMetaInfo->pMeterMeta->index; } else { // it must be the parent SSqlObj for super table query - if ((pSql->cmd.type & TSDB_QUERY_TYPE_SUBQUERY) != 0) { - int32_t idx = pMeterMetaInfo->vnodeIndex; + if ((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) != 0) { + int32_t idx = pMeterMetaInfo->vnodeIndex; + SVnodeSidList *pSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, idx); pSql->index = pSidList->index; } @@ -784,17 +791,18 @@ int tscProcessSql(SSqlObj *pSql) { } // todo handle async situation - if (QUERY_IS_JOIN_QUERY(pSql->cmd.type)) { - if ((pSql->cmd.type & TSDB_QUERY_TYPE_SUBQUERY) == 0) { + if (QUERY_IS_JOIN_QUERY(type)) { + if ((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0) { SSubqueryState *pState = calloc(1, sizeof(SSubqueryState)); - pState->numOfTotal = pSql->cmd.numOfTables; - for (int32_t i = 0; i < pSql->cmd.numOfTables; ++i) { + pState->numOfTotal = pQueryInfo->numOfTables; + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { SJoinSubquerySupporter *pSupporter = tscCreateJoinSupporter(pSql, pState, i); if (pSupporter == NULL) { // failed to create support struct, abort current query tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i); - pState->numOfCompleted = pSql->cmd.numOfTables - i - 1; + pState->numOfCompleted = pQueryInfo->numOfTables - i - 1; pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; return pSql->res.code; @@ -809,10 +817,10 @@ int tscProcessSql(SSqlObj *pSql) { } } - sem_post(&pSql->emptyRspSem); - sem_wait(&pSql->rspSem); + tsem_post(&pSql->emptyRspSem); + tsem_wait(&pSql->rspSem); - sem_post(&pSql->emptyRspSem); + tsem_post(&pSql->emptyRspSem); if (pSql->numOfSubs <= 0) { pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; @@ -823,16 +831,16 @@ int tscProcessSql(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } else { // for first stage sub query, iterate all vnodes to get all timestamp - if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { return doProcessSql(pSql); } } } - if (tscIsTwoStageMergeMetricQuery(pCmd)) { + if (tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) { /* * (ref. line: 964) - * Before this function returns from tscLaunchMetricSubQueries and continues, pSql may have been released at user + * Before this function returns from tscLaunchSTableSubqueries and continues, pSql may have been released at user * program context after retrieving all data from vnodes. User function is called at tscRetrieveFromVnodeCallBack. * * when pSql being released, pSql->fp == NULL, it may pass the check of pSql->fp == NULL, @@ -840,14 +848,14 @@ int tscProcessSql(SSqlObj *pSql) { */ void *fp = pSql->fp; - if (tscLaunchMetricSubQueries(pSql) != TSDB_CODE_SUCCESS) { + if (tscLaunchSTableSubqueries(pSql) != TSDB_CODE_SUCCESS) { return pRes->code; } if (fp == NULL) { - sem_post(&pSql->emptyRspSem); - sem_wait(&pSql->rspSem); - sem_post(&pSql->emptyRspSem); + tsem_post(&pSql->emptyRspSem); + tsem_wait(&pSql->rspSem); + tsem_post(&pSql->emptyRspSem); // set the command flag must be after the semaphore been correctly set. pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; @@ -859,37 +867,36 @@ int tscProcessSql(SSqlObj *pSql) { return doProcessSql(pSql); } -static void doCleanupSubqueries(SSqlObj *pSql, int32_t vnodeIndex, int32_t numOfVnodes, SRetrieveSupport *pTrs, - tOrderDescriptor *pDesc, tColModel *pModel, tExtMemBuffer **pMemoryBuf, - SSubqueryState *pState) { - pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; - pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; - - /* - * if i > 0, at least one sub query is issued, the allocated resource is - * freed by it when subquery completed. - */ - if (vnodeIndex == 0) { - tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfVnodes); - tfree(pState); - - if (pTrs != NULL) { - tfree(pTrs->localBuffer); - - pthread_mutex_unlock(&pTrs->queryMutex); - pthread_mutex_destroy(&pTrs->queryMutex); - tfree(pTrs); - } +static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState* pState) { + assert(numOfSubs <= pSql->numOfSubs && numOfSubs >= 0 && pState != NULL); + + for(int32_t i = 0; i < numOfSubs; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + SRetrieveSupport* pSupport = pSub->param; + + tfree(pSupport->localBuffer); + + pthread_mutex_unlock(&pSupport->queryMutex); + pthread_mutex_destroy(&pSupport->queryMutex); + + tfree(pSupport); + + tscFreeSqlObj(pSub); } + + free(pState); } -int tscLaunchMetricSubQueries(SSqlObj *pSql) { +int tscLaunchSTableSubqueries(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; // pRes->code check only serves in launching metric sub-queries if (pRes->code == TSDB_CODE_QUERY_CANCELLED) { - pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; // enable the abort of kill metric function. - return pSql->res.code; + pCmd->command = TSDB_SQL_RETRIEVE_METRIC; // enable the abort of kill metric function. + return pRes->code; } tExtMemBuffer ** pMemoryBuf = NULL; @@ -898,10 +905,12 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { pRes->qhandle = 1; // hack the qhandle check - const uint32_t nBufferSize = (1 << 16); // 64KB - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - int32_t numOfVnodes = pMeterMetaInfo->pMetricMeta->numOfVnodes; - assert(numOfVnodes > 0); + const uint32_t nBufferSize = (1 << 16); // 64KB + + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + int32_t numOfSubQueries = pMeterMetaInfo->pMetricMeta->numOfVnodes; + assert(numOfSubQueries > 0); int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize); if (ret != 0) { @@ -912,36 +921,33 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { return pRes->code; } - pSql->pSubs = malloc(POINTER_BYTES * numOfVnodes); - pSql->numOfSubs = numOfVnodes; + pSql->pSubs = calloc(numOfSubQueries, POINTER_BYTES); + pSql->numOfSubs = numOfSubQueries; - tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfVnodes); + tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfSubQueries); SSubqueryState *pState = calloc(1, sizeof(SSubqueryState)); - pState->numOfTotal = numOfVnodes; + pState->numOfTotal = numOfSubQueries; pRes->code = TSDB_CODE_SUCCESS; - for (int32_t i = 0; i < numOfVnodes; ++i) { - if (pRes->code == TSDB_CODE_QUERY_CANCELLED || pRes->code == TSDB_CODE_CLI_OUT_OF_MEMORY) { - /* - * during launch sub queries, if the master query is cancelled. the remain is ignored and set the retrieveDoneRec - * to the value of remaining not built sub-queries. So, the already issued sub queries can successfully free - * allocated resources. - */ - pState->numOfCompleted = (numOfVnodes - i); - doCleanupSubqueries(pSql, i, numOfVnodes, NULL, pDesc, pModel, pMemoryBuf, pState); - - if (i == 0) { - return pSql->res.code; - } - + int32_t i = 0; + for (; i < numOfSubQueries; ++i) { + SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport)); + if (trs == NULL) { + tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); break; } - - SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport)); + trs->pExtMemBuffer = pMemoryBuf; trs->pOrderDescriptor = pDesc; trs->pState = pState; + trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); + if (trs->localBuffer == NULL) { + tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); + tfree(trs); + break; + } + trs->subqueryIndex = i; trs->pParentSqlObj = pSql; trs->pFinalColModel = pModel; @@ -952,25 +958,43 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { pthread_mutexattr_destroy(&mutexattr); SSqlObj *pNew = tscCreateSqlObjForSubquery(pSql, trs, NULL); - if (pNew == NULL) { - pState->numOfCompleted = (numOfVnodes - i); - doCleanupSubqueries(pSql, i, numOfVnodes, trs, pDesc, pModel, pMemoryBuf, pState); - - if (i == 0) { - return pSql->res.code; - } - + tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); + tfree(trs->localBuffer); + tfree(trs); break; } // todo handle multi-vnode situation - if (pSql->cmd.tsBuf) { - pNew->cmd.tsBuf = tsBufClone(pSql->cmd.tsBuf); + if (pQueryInfo->tsBuf) { + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pNewQueryInfo->tsBuf = tsBufClone(pQueryInfo->tsBuf); } - - tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, trs->subqueryIndex); - tscProcessSql(pNew); + + tscTrace("%p sub:%p create subquery success. orderOfSub:%d", pSql, pNew, trs->subqueryIndex); + } + + if (i < numOfSubQueries) { + tscError("%p failed to prepare subquery structure and launch subqueries", pSql); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries); + doCleanupSubqueries(pSql, i, pState); + return pRes->code; // free all allocated resource + } + + if (pRes->code == TSDB_CODE_QUERY_CANCELLED) { + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries); + doCleanupSubqueries(pSql, i, pState); + return pRes->code; + } + + for(int32_t j = 0; j < numOfSubQueries; ++j) { + SSqlObj* pSub = pSql->pSubs[j]; + SRetrieveSupport* pSupport = pSub->param; + + tscTrace("%p sub:%p launch subquery, orderOfSub:%d.", pSql, pSub, pSupport->subqueryIndex); + tscProcessSql(pSub); } return TSDB_CODE_SUCCESS; @@ -1021,10 +1045,13 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq int32_t subqueryIndex = trsupport->subqueryIndex; assert(pSql != NULL); + SSubqueryState* pState = trsupport->pState; + assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 && + pPObj->numOfSubs == pState->numOfTotal); /* retrieved in subquery failed. OR query cancelled in retrieve phase. */ - if (trsupport->pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) { - trsupport->pState->code = -(int)pPObj->res.code; + if (pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) { + pState->code = -(int)pPObj->res.code; /* * kill current sub-query connection, which may retrieve data from vnodes; @@ -1033,15 +1060,15 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq pSql->res.numOfRows = 0; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts tscTrace("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", trsupport->pParentSqlObj, pSql, - subqueryIndex, trsupport->pState->code); + subqueryIndex, pState->code); } if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query. tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, subqueryIndex); tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, - subqueryIndex, trsupport->pState->code); + subqueryIndex, pState->code); } else { - if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && trsupport->pState->code == TSDB_CODE_SUCCESS) { + if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pState->code == TSDB_CODE_SUCCESS) { /* * current query failed, and the retry count is less than the available * count, retry query clear previous retrieved data, then launch a new sub query @@ -1060,7 +1087,7 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq tscError("%p sub:%p failed to create new subquery sqlobj due to out of memory, abort retry", trsupport->pParentSqlObj, pSql); - trsupport->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; return; } @@ -1068,24 +1095,27 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq tscProcessSql(pNew); return; } else { // reach the maximum retry count, abort - atomic_val_compare_exchange_32(&trsupport->pState->code, TSDB_CODE_SUCCESS, numOfRows); + atomic_val_compare_exchange_32(&pState->code, TSDB_CODE_SUCCESS, numOfRows); tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql, - numOfRows, subqueryIndex, trsupport->pState->code); + numOfRows, subqueryIndex, pState->code); } } - if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { + int32_t numOfTotal = pState->numOfTotal; + + int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1); + if (finished < numOfTotal) { + tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished); return tscFreeSubSqlObj(trsupport, pSql); } // all subqueries are failed - tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, trsupport->pState->numOfTotal, - trsupport->pState->code); - pPObj->res.code = -(trsupport->pState->code); + tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, pState->numOfTotal, pState->code); + pPObj->res.code = -(pState->code); // release allocated resource tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel, - trsupport->pState->numOfTotal); + pState->numOfTotal); tfree(trsupport->pState); tscFreeSubSqlObj(trsupport, pSql); @@ -1101,7 +1131,9 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; } else { // in case of second stage join subquery, invoke its callback function instead of regular QueueAsyncRes - if ((pPObj->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0); + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { (*pPObj->fp)(pPObj->param, pPObj, pPObj->res.code); } else { // regular super table query if (pPObj->res.code != TSDB_CODE_SUCCESS) { @@ -1118,36 +1150,50 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { tOrderDescriptor *pDesc = trsupport->pOrderDescriptor; SSqlObj *pSql = (SSqlObj *)tres; - if (pSql == NULL) { // sql object has been released in error process, return immediately + if (pSql == NULL) { // sql object has been released in error process, return immediately tscTrace("%p subquery has been released, idx:%d, abort", pPObj, idx); return; } + SSubqueryState* pState = trsupport->pState; + assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 && + pPObj->numOfSubs == pState->numOfTotal); + // query process and cancel query process may execute at the same time pthread_mutex_lock(&trsupport->queryMutex); - if (numOfRows < 0 || trsupport->pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) { + if (numOfRows < 0 || pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) { return tscHandleSubRetrievalError(trsupport, pSql, numOfRows); } - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlRes * pRes = &pSql->res; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SVnodeSidList *vnodeInfo = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, idx); SVPeerDesc * pSvd = &vnodeInfo->vpeerDesc[vnodeInfo->index]; if (numOfRows > 0) { assert(pRes->numOfRows == numOfRows); - atomic_add_fetch_64(&trsupport->pState->numOfRetrievedRows, numOfRows); + int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows); tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql, - pRes->numOfRows, trsupport->pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx); + pRes->numOfRows, pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx); + + if (num > tsMaxNumOfOrderedResults) { + tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId64 " , current:%" PRId64, + pPObj, pSql, tsMaxNumOfOrderedResults, num); + tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_SORTED_RES_TOO_MANY); + return; + } + #ifdef _DEBUG_VIEW printf("received data from vnode: %d rows\n", pRes->numOfRows); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, &pPObj->cmd); + + tscGetSrcColumnInfo(colInfo, pQueryInfo); tColModelDisplayEx(pDesc->pSchema, pRes->data, pRes->numOfRows, pRes->numOfRows, colInfo); #endif if (tsTotalTmpDirGB != 0 && tsAvailTmpDirGB < tsMinimalTmpDirGB) { @@ -1157,7 +1203,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { return; } int32_t ret = saveToBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, pRes->data, - pRes->numOfRows, pCmd->groupbyExpr.orderType); + pRes->numOfRows, pQueryInfo->groupbyExpr.orderType); if (ret < 0) { // set no disk space error info, and abort retry tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); @@ -1168,20 +1214,20 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { } else { // all data has been retrieved to client /* data in from current vnode is stored in cache and disk */ - uint32_t numOfRowsFromVnode = - trsupport->pExtMemBuffer[idx]->numOfAllElems + trsupport->localBuffer->numOfElems; + uint32_t numOfRowsFromVnode = trsupport->pExtMemBuffer[idx]->numOfAllElems + trsupport->localBuffer->numOfElems; tscTrace("%p sub:%p all data retrieved from ip:%u,vid:%d, numOfRows:%d, orderOfSub:%d", pPObj, pSql, pSvd->ip, pSvd->vnode, numOfRowsFromVnode, idx); tColModelCompact(pDesc->pSchema, trsupport->localBuffer, pDesc->pSchema->maxCapacity); #ifdef _DEBUG_VIEW - printf("%ld rows data flushed to disk:\n", trsupport->localBuffer->numOfElems); + printf("%" PRIu64 " rows data flushed to disk:\n", trsupport->localBuffer->numOfElems); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, &pPObj->cmd); + tscGetSrcColumnInfo(colInfo, pQueryInfo); tColModelDisplayEx(pDesc->pSchema, trsupport->localBuffer->data, trsupport->localBuffer->numOfElems, trsupport->localBuffer->numOfElems, colInfo); #endif + if (tsTotalTmpDirGB != 0 && tsAvailTmpDirGB < tsMinimalTmpDirGB) { tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pPObj, pSql, tsAvailTmpDirGB, tsMinimalTmpDirGB); @@ -1191,14 +1237,21 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { // each result for a vnode is ordered as an independant list, // then used as an input of loser tree for disk-based merge routine - int32_t ret = - tscFlushTmpBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, pCmd->groupbyExpr.orderType); + int32_t ret = tscFlushTmpBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, + pQueryInfo->groupbyExpr.orderType); if (ret != 0) { /* set no disk space error info, and abort retry */ return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); } - - if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { + + // keep this value local variable, since the pState variable may be released by other threads, if atomic_add opertion + // increases the finished value up to pState->numOfTotal value, which means all subqueries are completed. + // In this case, the comparsion between finished value and released pState->numOfTotal is not safe. + int32_t numOfTotal = pState->numOfTotal; + + int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1); + if (finished < numOfTotal) { + tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished); return tscFreeSubSqlObj(trsupport, pSql); } @@ -1206,10 +1259,12 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { pDesc->pSchema->maxCapacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage; tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%d,start to build loser tree", pPObj, - trsupport->pState->numOfTotal, trsupport->pState->numOfCompleted); + pState->numOfTotal, pState->numOfRetrievedRows); + + SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0); + tscClearInterpInfo(pPQueryInfo); - tscClearInterpInfo(&pPObj->cmd); - tscCreateLocalReducer(trsupport->pExtMemBuffer, trsupport->pState->numOfTotal, pDesc, trsupport->pFinalColModel, + tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel, &pPObj->cmd, &pPObj->res); tscTrace("%p build loser tree completed", pPObj); @@ -1218,7 +1273,8 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { pPObj->res.row = 0; // only free once - free(trsupport->pState); + tfree(trsupport->pState); + tscFreeSubSqlObj(trsupport, pSql); if (pPObj->fp == NULL) { @@ -1239,7 +1295,10 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { } void tscKillMetricQuery(SSqlObj *pSql) { - if (!tscIsTwoStageMergeMetricQuery(&pSql->cmd)) { + SSqlCmd* pCmd = &pSql->cmd; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (!tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) { return; } @@ -1282,15 +1341,19 @@ void tscKillMetricQuery(SSqlObj *pSql) { static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int retCode); static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) { - SSqlObj *pNew = createSubqueryObj(pSql, 0, tscRetrieveDataRes, trsupport, prevSqlObj); + const int32_t table_index = 0; + + SSqlObj *pNew = createSubqueryObj(pSql, table_index, tscRetrieveDataRes, trsupport, prevSqlObj); if (pNew != NULL) { // the sub query of two-stage super table query - pNew->cmd.type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; - assert(pNew->cmd.numOfTables == 1); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; - //launch subquery for each vnode, so the subquery index equals to the vnodeIndex. - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); + assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1); + + // launch subquery for each vnode, so the subquery index equals to the vnodeIndex. + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, table_index); pMeterMetaInfo->vnodeIndex = trsupport->subqueryIndex; - + pSql->pSubs[trsupport->subqueryIndex] = pNew; } @@ -1299,10 +1362,14 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { SRetrieveSupport *trsupport = (SRetrieveSupport *)param; - - SSqlObj * pSql = (SSqlObj *)tres; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - int32_t idx = pMeterMetaInfo->vnodeIndex; + + SSqlObj* pParentSql = trsupport->pParentSqlObj; + SSqlObj* pSql = (SSqlObj *)tres; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + assert(pSql->cmd.numOfClause == 1 && pSql->cmd.pQueryInfo[0]->numOfTables == 1); + + int32_t idx = pMeterMetaInfo->vnodeIndex; SVnodeSidList *vnodeInfo = NULL; SVPeerDesc * pSvd = NULL; @@ -1311,15 +1378,19 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { pSvd = &vnodeInfo->vpeerDesc[vnodeInfo->index]; } - if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS || trsupport->pState->code != TSDB_CODE_SUCCESS) { + SSubqueryState* pState = trsupport->pState; + assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 && + pParentSql->numOfSubs == pState->numOfTotal); + + if (pParentSql->res.code != TSDB_CODE_SUCCESS || pState->code != TSDB_CODE_SUCCESS) { // metric query is killed, Note: code must be less than 0 trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; - if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS) { - code = -(int)(trsupport->pParentSqlObj->res.code); + if (pParentSql->res.code != TSDB_CODE_SUCCESS) { + code = -(int)(pParentSql->res.code); } else { - code = trsupport->pState->code; + code = pState->code; } - tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", trsupport->pParentSqlObj, pSql, + tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", pParentSql, pSql, trsupport->subqueryIndex, code); } @@ -1332,49 +1403,54 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { */ if (code != TSDB_CODE_SUCCESS) { if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) { - tscTrace("%p sub:%p reach the max retry count,set global code:%d", trsupport->pParentSqlObj, pSql, code); - atomic_val_compare_exchange_32(&trsupport->pState->code, 0, code); + tscTrace("%p sub:%p reach the max retry count,set global code:%d", pParentSql, pSql, code); + atomic_val_compare_exchange_32(&pState->code, 0, code); } else { // does not reach the maximum retry count, go on - tscTrace("%p sub:%p failed code:%d, retry:%d", trsupport->pParentSqlObj, pSql, code, trsupport->numOfRetry); + tscTrace("%p sub:%p failed code:%d, retry:%d", pParentSql, pSql, code, trsupport->numOfRetry); - SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); + SSqlObj *pNew = tscCreateSqlObjForSubquery(pParentSql, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d", - trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->subqueryIndex); + trsupport->pParentSqlObj, pSql, pSvd != NULL ? pSvd->vnode : -1, trsupport->subqueryIndex); - trsupport->pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY; + pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; } else { - assert(pNew->cmd.pMeterInfo[0]->pMeterMeta != NULL && pNew->cmd.pMeterInfo[0]->pMetricMeta != NULL); + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + assert(pNewQueryInfo->pMeterInfo[0]->pMeterMeta != NULL && pNewQueryInfo->pMeterInfo[0]->pMetricMeta != NULL); tscProcessSql(pNew); return; } } } - if (trsupport->pState->code != TSDB_CODE_SUCCESS) { // failed, abort + if (pState->code != TSDB_CODE_SUCCESS) { // failed, abort if (vnodeInfo != NULL) { - tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, + tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", pParentSql, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, - trsupport->subqueryIndex, trsupport->pState->code); + trsupport->subqueryIndex, pState->code); } else { - tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, - trsupport->subqueryIndex, trsupport->pState->code); + tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", pParentSql, pSql, + trsupport->subqueryIndex, pState->code); } - tscRetrieveFromVnodeCallBack(param, tres, trsupport->pState->code); + tscRetrieveFromVnodeCallBack(param, tres, pState->code); } else { // success, proceed to retrieve data from dnode - tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, + if (vnodeInfo != NULL) { + tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, trsupport->subqueryIndex); + } else { + tscTrace("%p sub:%p query complete, orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, + trsupport->subqueryIndex); + } taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param); } } -int tscBuildRetrieveMsg(SSqlObj *pSql) { +int tscBuildRetrieveMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char *pMsg, *pStart; - int msgLen = 0; pStart = pSql->cmd.payload + tsRpcHeadSize; pMsg = pStart; @@ -1382,20 +1458,20 @@ int tscBuildRetrieveMsg(SSqlObj *pSql) { *((uint64_t *)pMsg) = pSql->res.qhandle; pMsg += sizeof(pSql->res.qhandle); - *((uint16_t*)pMsg) = htons(pSql->cmd.type); - pMsg += sizeof(pSql->cmd.type); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + *((uint16_t *)pMsg) = htons(pQueryInfo->type); + pMsg += sizeof(pQueryInfo->type); - msgLen = pMsg - pStart; - pSql->cmd.payloadLen = msgLen; + pSql->cmd.payloadLen = pMsg - pStart; pSql->cmd.msgType = TSDB_MSG_TYPE_RETRIEVE; - return msgLen; + return TSDB_CODE_SUCCESS; } void tscUpdateVnodeInSubmitMsg(SSqlObj *pSql, char *buf) { SShellSubmitMsg *pShellMsg; char * pMsg; - SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, 0); SMeterMeta *pMeterMeta = pMeterMetaInfo->pMeterMeta; @@ -1403,35 +1479,39 @@ void tscUpdateVnodeInSubmitMsg(SSqlObj *pSql, char *buf) { pShellMsg = (SShellSubmitMsg *)pMsg; pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); - tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pSql->index].ip), htons(pShellMsg->vnode)); + tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pSql->index].ip), + htons(pShellMsg->vnode)); } -int tscBuildSubmitMsg(SSqlObj *pSql) { +int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SShellSubmitMsg *pShellMsg; char * pMsg, *pStart; - int msgLen = 0; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + SMeterMeta *pMeterMeta = pMeterMetaInfo->pMeterMeta; pStart = pSql->cmd.payload + tsRpcHeadSize; pMsg = pStart; pShellMsg = (SShellSubmitMsg *)pMsg; - pShellMsg->import = pSql->cmd.import; + + pShellMsg->import = htons(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT) ? 0 : 1); pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode); - pShellMsg->numOfSid = htonl(pSql->cmd.count); // number of meters to be inserted + pShellMsg->numOfSid = htonl(pSql->cmd.numOfTablesInSubmit); // number of meters to be inserted // pSql->cmd.payloadLen is set during parse sql routine, so we do not use it here pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT; - tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pMeterMeta->index].ip), htons(pShellMsg->vnode)); - - return msgLen; + tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pMeterMeta->index].ip), + htons(pShellMsg->vnode)); + + return TSDB_CODE_SUCCESS; } void tscUpdateVnodeInQueryMsg(SSqlObj *pSql, char *buf) { SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); char * pStart = buf + tsRpcHeadSize; SQueryMeterMsg *pQueryMsg = (SQueryMeterMsg *)pStart; @@ -1450,15 +1530,17 @@ void tscUpdateVnodeInQueryMsg(SSqlObj *pSql, char *buf) { * for meter query, simply return the size <= 1k * for metric query, estimate size according to meter tags */ -static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) { +static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) { const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5; - int32_t srcColListSize = pCmd->numOfCols * sizeof(SColumnInfo); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); - int32_t exprSize = sizeof(SSqlFuncExprMsg) * pCmd->fieldsInfo.numOfOutputCols; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + int32_t srcColListSize = pQueryInfo->colList.numOfCols * sizeof(SColumnInfo); + + int32_t exprSize = sizeof(SSqlFuncExprMsg) * pQueryInfo->fieldsInfo.numOfOutputCols; + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); // meter query without tags values - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryMeterMsg) + srcColListSize + exprSize; } @@ -1467,67 +1549,70 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) { SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); int32_t meterInfoSize = (pMetricMeta->tagLen + sizeof(SMeterSidExtInfo)) * pVnodeSidList->numOfSids; - int32_t outputColumnSize = pCmd->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg); + int32_t outputColumnSize = pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg); int32_t size = meterInfoSize + outputColumnSize + srcColListSize + exprSize + MIN_QUERY_MSG_PKT_SIZE; - if (pCmd->tsBuf != NULL) { - size += pCmd->tsBuf->fileSize; + if (pQueryInfo->tsBuf != NULL) { + size += pQueryInfo->tsBuf->fileSize; } return size; } -static char* doSerializeTableInfo(SSqlObj* pSql, int32_t numOfMeters, int32_t vnodeId, char* pMsg) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - +static char *doSerializeTableInfo(SSqlObj *pSql, int32_t numOfMeters, int32_t vnodeId, char *pMsg) { + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, 0); + SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; tscTrace("%p vid:%d, query on %d meters", pSql, htons(vnodeId), numOfMeters); if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { #ifdef _DEBUG_VIEW - tscTrace("%p sid:%d, uid:%lld", pSql, pMeterMetaInfo->pMeterMeta->sid, pMeterMetaInfo->pMeterMeta->uid); + tscTrace("%p sid:%d, uid:%" PRIu64, pSql, pMeterMetaInfo->pMeterMeta->sid, pMeterMetaInfo->pMeterMeta->uid); #endif SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg; pMeterInfo->sid = htonl(pMeterMeta->sid); pMeterInfo->uid = htobe64(pMeterMeta->uid); - + pMeterInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pMeterMeta->uid)); pMsg += sizeof(SMeterSidExtInfo); } else { SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); - + for (int32_t i = 0; i < numOfMeters; ++i) { SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg; SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i); - + pMeterInfo->sid = htonl(pQueryMeterInfo->sid); pMeterInfo->uid = htobe64(pQueryMeterInfo->uid); + pMeterInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pQueryMeterInfo->uid)); pMsg += sizeof(SMeterSidExtInfo); - + memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen); pMsg += pMetricMeta->tagLen; #ifdef _DEBUG_VIEW - tscTrace("%p sid:%d, uid:%lld", pSql, pQueryMeterInfo->sid, pQueryMeterInfo->uid); + tscTrace("%p sid:%d, uid:%" PRId64, pSql, pQueryMeterInfo->sid, pQueryMeterInfo->uid); #endif } } - + return pMsg; } -int tscBuildQueryMsg(SSqlObj *pSql) { +int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - int32_t size = tscEstimateQueryMsgSize(pCmd); + int32_t size = tscEstimateQueryMsgSize(pCmd, pCmd->clauseIndex); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for query msg", pSql); return -1; } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + char * pStart = pCmd->payload + tsRpcHeadSize; SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; @@ -1569,26 +1654,25 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->numOfSids = htonl(numOfMeters); pQueryMsg->numOfTagsCols = htons(pMeterMetaInfo->numOfTags); - if (pCmd->order.order == TSQL_SO_ASC) { - pQueryMsg->skey = htobe64(pCmd->stime); - pQueryMsg->ekey = htobe64(pCmd->etime); + if (pQueryInfo->order.order == TSQL_SO_ASC) { + pQueryMsg->skey = htobe64(pQueryInfo->stime); + pQueryMsg->ekey = htobe64(pQueryInfo->etime); } else { - pQueryMsg->skey = htobe64(pCmd->etime); - pQueryMsg->ekey = htobe64(pCmd->stime); + pQueryMsg->skey = htobe64(pQueryInfo->etime); + pQueryMsg->ekey = htobe64(pQueryInfo->stime); } - pQueryMsg->num = htonl(0); - pQueryMsg->order = htons(pCmd->order.order); - pQueryMsg->orderColId = htons(pCmd->order.orderColId); + pQueryMsg->order = htons(pQueryInfo->order.order); + pQueryMsg->orderColId = htons(pQueryInfo->order.orderColId); - pQueryMsg->interpoType = htons(pCmd->interpoType); + pQueryMsg->interpoType = htons(pQueryInfo->interpoType); - pQueryMsg->limit = htobe64(pCmd->limit.limit); - pQueryMsg->offset = htobe64(pCmd->limit.offset); + pQueryMsg->limit = htobe64(pQueryInfo->limit.limit); + pQueryMsg->offset = htobe64(pQueryInfo->limit.offset); - pQueryMsg->numOfCols = htons(pCmd->colList.numOfCols); + pQueryMsg->numOfCols = htons(pQueryInfo->colList.numOfCols); - if (pCmd->colList.numOfCols <= 0) { + if (pQueryInfo->colList.numOfCols <= 0) { tscError("%p illegal value of numOfCols in query msg: %d", pSql, pMeterMeta->numOfColumns); return -1; } @@ -1598,19 +1682,21 @@ int tscBuildQueryMsg(SSqlObj *pSql) { return -1; } - pQueryMsg->nAggTimeInterval = htobe64(pCmd->nAggTimeInterval); - pQueryMsg->intervalTimeUnit = pCmd->intervalTimeUnit; - if (pCmd->nAggTimeInterval < 0) { - tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pCmd->nAggTimeInterval); + pQueryMsg->nAggTimeInterval = htobe64(pQueryInfo->nAggTimeInterval); + pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit; + pQueryMsg->slidingTime = htobe64(pQueryInfo->nSlidingTime); + + if (pQueryInfo->nAggTimeInterval < 0) { + tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->nAggTimeInterval); return -1; } - if (pCmd->groupbyExpr.numOfGroupCols < 0) { - tscError("%p illegal value of numOfGroupCols in query msg: %d", pSql, pCmd->groupbyExpr.numOfGroupCols); + if (pQueryInfo->groupbyExpr.numOfGroupCols < 0) { + tscError("%p illegal value of numOfGroupCols in query msg: %d", pSql, pQueryInfo->groupbyExpr.numOfGroupCols); return -1; } - pQueryMsg->numOfGroupCols = htons(pCmd->groupbyExpr.numOfGroupCols); + pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { // query on meter pQueryMsg->tagLength = 0; @@ -1618,20 +1704,21 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->tagLength = htons(pMetricMeta->tagLen); } - pQueryMsg->queryType = htons(pCmd->type); - pQueryMsg->numOfOutputCols = htons(pCmd->exprsInfo.numOfExprs); + pQueryMsg->queryType = htons(pQueryInfo->type); + pQueryMsg->numOfOutputCols = htons(pQueryInfo->exprsInfo.numOfExprs); - if (pCmd->fieldsInfo.numOfOutputCols < 0) { - tscError("%p illegal value of number of output columns in query msg: %d", pSql, pCmd->fieldsInfo.numOfOutputCols); + if (pQueryInfo->fieldsInfo.numOfOutputCols < 0) { + tscError("%p illegal value of number of output columns in query msg: %d", pSql, + pQueryInfo->fieldsInfo.numOfOutputCols); return -1; } // set column list ids - char * pMsg = (char *)(pQueryMsg->colList) + pCmd->colList.numOfCols * sizeof(SColumnInfo); + char * pMsg = (char *)(pQueryMsg->colList) + pQueryInfo->colList.numOfCols * sizeof(SColumnInfo); SSchema *pSchema = tsGetSchema(pMeterMeta); - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { - SColumnBase *pCol = tscColumnBaseInfoGet(&pCmd->colList, i); + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { + SColumnBase *pCol = tscColumnBaseInfoGet(&pQueryInfo->colList, i); SSchema * pColSchema = &pSchema[pCol->colIndex.columnIndex]; if (pCol->colIndex.columnIndex >= pMeterMeta->numOfColumns || pColSchema->type < TSDB_DATA_TYPE_BOOL || @@ -1680,14 +1767,14 @@ int tscBuildQueryMsg(SSqlObj *pSql) { SSqlFuncExprMsg *pSqlFuncExpr = (SSqlFuncExprMsg *)pMsg; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_ARITHM) { hasArithmeticFunction = true; } - if (!tscValidateColumnId(pCmd, pExpr->colInfo.colId)) { + if (!tscValidateColumnId(pMeterMetaInfo, pExpr->colInfo.colId)) { /* column id is not valid according to the cached metermeta, the meter meta is expired */ tscError("%p table schema is not matched with parsed sql", pSql); return -1; @@ -1720,8 +1807,8 @@ int tscBuildQueryMsg(SSqlObj *pSql) { int32_t len = 0; if (hasArithmeticFunction) { - SColumnBase *pColBase = pCmd->colList.pColList; - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { + SColumnBase *pColBase = pQueryInfo->colList.pColList; + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { char * name = pSchema[pColBase[i].colIndex.columnIndex].name; int32_t lenx = strlen(name); memcpy(pMsg, name, lenx); @@ -1755,7 +1842,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) { } } - SSqlGroupbyExpr *pGroupbyExpr = &pCmd->groupbyExpr; + SSqlGroupbyExpr *pGroupbyExpr = &pQueryInfo->groupbyExpr; if (pGroupbyExpr->numOfGroupCols != 0) { pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex); pQueryMsg->orderType = htons(pGroupbyExpr->orderType); @@ -1777,10 +1864,10 @@ int tscBuildQueryMsg(SSqlObj *pSql) { } } - if (pCmd->interpoType != TSDB_INTERPO_NONE) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - *((int64_t *)pMsg) = htobe64(pCmd->defaultVal[i]); - pMsg += sizeof(pCmd->defaultVal[0]); + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + *((int64_t *)pMsg) = htobe64(pQueryInfo->defaultVal[i]); + pMsg += sizeof(pQueryInfo->defaultVal[0]); } } @@ -1789,13 +1876,13 @@ int tscBuildQueryMsg(SSqlObj *pSql) { int32_t tsLen = 0; int32_t numOfBlocks = 0; - if (pCmd->tsBuf != NULL) { - STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pMeterMetaInfo->vnodeIndex); - assert(QUERY_IS_JOIN_QUERY(pCmd->type) && pBlockInfo != NULL); // this query should not be sent + if (pQueryInfo->tsBuf != NULL) { + STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pQueryInfo->tsBuf, pMeterMetaInfo->vnodeIndex); + assert(QUERY_IS_JOIN_QUERY(pQueryInfo->type) && pBlockInfo != NULL); // this query should not be sent // todo refactor - fseek(pCmd->tsBuf->f, pBlockInfo->offset, SEEK_SET); - fread(pMsg, pBlockInfo->compLen, 1, pCmd->tsBuf->f); + fseek(pQueryInfo->tsBuf->f, pBlockInfo->offset, SEEK_SET); + fread(pMsg, pBlockInfo->compLen, 1, pQueryInfo->tsBuf->f); pMsg += pBlockInfo->compLen; tsLen = pBlockInfo->compLen; @@ -1804,8 +1891,8 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->tsLen = htonl(tsLen); pQueryMsg->tsNumOfBlocks = htonl(numOfBlocks); - if (pCmd->tsBuf != NULL) { - pQueryMsg->tsOrder = htonl(pCmd->tsBuf->tsOrder); + if (pQueryInfo->tsBuf != NULL) { + pQueryMsg->tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); } msgLen = pMsg - pStart; @@ -1815,346 +1902,277 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } -int tscBuildCreateDbMsg(SSqlObj *pSql) { +int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SCreateDbMsg *pCreateDbMsg; char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - pStart = pCmd->payload + tsRpcHeadSize; - pMsg = pStart; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + SSqlCmd *pCmd = &pSql->cmd; + pMsg = doBuildMsgHeader(pSql, &pStart); pCreateDbMsg = (SCreateDbMsg *)pMsg; + + assert(pCmd->numOfClause == 1); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + strncpy(pCreateDbMsg->db, pMeterMetaInfo->name, tListLen(pCreateDbMsg->db)); pMsg += sizeof(SCreateDbMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_CREATE_DB; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildCreateDnodeMsg(SSqlObj *pSql) { +int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SCreateDnodeMsg *pCreate; - char * pMsg, *pStart; - int msgLen = 0; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + char *pMsg, *pStart; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SSqlCmd *pCmd = &pSql->cmd; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); pCreate = (SCreateDnodeMsg *)pMsg; - strcpy(pCreate->ip, pMeterMetaInfo->name); + strncpy(pCreate->ip, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); pMsg += sizeof(SCreateDnodeMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_CREATE_PNODE; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_DNODE; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropDnodeMsg(SSqlObj *pSql) { - SDropDnodeMsg *pDrop; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - - pDrop = (SDropDnodeMsg *)pMsg; - strcpy(pDrop->ip, pMeterMetaInfo->name); +int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SCreateAcctMsg *pAlterMsg; + char * pMsg, *pStart; + int msgLen = 0; - pMsg += sizeof(SDropDnodeMsg); + SSqlCmd *pCmd = &pSql->cmd; - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_PNODE; + pMsg = doBuildMsgHeader(pSql, &pStart); - return msgLen; -} + pAlterMsg = (SCreateAcctMsg *)pMsg; -int tscBuildCreateUserMsg(SSqlObj *pSql) { - SCreateUserMsg *pCreateMsg; - char * pMsg, *pStart; - int msgLen = 0; + SSQLToken *pName = &pInfo->pDCLInfo->user.user; + SSQLToken *pPwd = &pInfo->pDCLInfo->user.passwd; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + strncpy(pAlterMsg->user, pName->z, pName->n); + strncpy(pAlterMsg->pass, pPwd->z, pPwd->n); - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + pMsg += sizeof(SCreateAcctMsg); - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + SCreateAcctSQL *pAcctOpt = &pInfo->pDCLInfo->acctOpt; - pCreateMsg = (SCreateUserMsg *)pMsg; - strcpy(pCreateMsg->user, pMeterMetaInfo->name); - strcpy(pCreateMsg->pass, pCmd->payload); + pAlterMsg->cfg.maxUsers = htonl(pAcctOpt->maxUsers); + pAlterMsg->cfg.maxDbs = htonl(pAcctOpt->maxDbs); + pAlterMsg->cfg.maxTimeSeries = htonl(pAcctOpt->maxTimeSeries); + pAlterMsg->cfg.maxStreams = htonl(pAcctOpt->maxStreams); + pAlterMsg->cfg.maxPointsPerSecond = htonl(pAcctOpt->maxPointsPerSecond); + pAlterMsg->cfg.maxStorage = htobe64(pAcctOpt->maxStorage); + pAlterMsg->cfg.maxQueryTime = htobe64(pAcctOpt->maxQueryTime); + pAlterMsg->cfg.maxConnections = htonl(pAcctOpt->maxConnections); - pMsg += sizeof(SCreateUserMsg); + if (pAcctOpt->stat.n == 0) { + pAlterMsg->cfg.accessState = -1; + } else { + if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) { + pAlterMsg->cfg.accessState = TSDB_VN_READ_ACCCESS; + } else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) { + pAlterMsg->cfg.accessState = TSDB_VN_WRITE_ACCCESS; + } else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) { + pAlterMsg->cfg.accessState = TSDB_VN_ALL_ACCCESS; + } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { + pAlterMsg->cfg.accessState = 0; + } + } msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_CREATE_USER; - return msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_ACCT; + return TSDB_CODE_SUCCESS; } -static int tscBuildAcctMsgImpl(SSqlObj *pSql) { - SCreateAcctMsg *pAlterMsg; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SCreateUserMsg *pAlterMsg; + char * pMsg, *pStart; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SSqlCmd *pCmd = &pSql->cmd; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); + pAlterMsg = (SCreateUserMsg *)pMsg; - pAlterMsg = (SCreateAcctMsg *)pMsg; - strcpy(pAlterMsg->user, pMeterMetaInfo->name); - strcpy(pAlterMsg->pass, pCmd->payload); + SUserInfo *pUser = &pInfo->pDCLInfo->user; + strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n); + + pAlterMsg->flag = pUser->type; - pMsg += sizeof(SCreateAcctMsg); + if (pUser->type == TSDB_ALTER_USER_PRIVILEGES) { + pAlterMsg->privilege = (char)pCmd->count; + } else if (pUser->type == TSDB_ALTER_USER_PASSWD) { + strncpy(pAlterMsg->pass, pUser->passwd.z, pUser->passwd.n); + } else { // create user password info + strncpy(pAlterMsg->pass, pUser->passwd.z, pUser->passwd.n); + } - pAlterMsg->cfg.maxUsers = htonl((int32_t)pCmd->defaultVal[0]); - pAlterMsg->cfg.maxDbs = htonl((int32_t)pCmd->defaultVal[1]); - pAlterMsg->cfg.maxTimeSeries = htonl((int32_t)pCmd->defaultVal[2]); - pAlterMsg->cfg.maxStreams = htonl((int32_t)pCmd->defaultVal[3]); - pAlterMsg->cfg.maxPointsPerSecond = htonl((int32_t)pCmd->defaultVal[4]); - pAlterMsg->cfg.maxStorage = htobe64(pCmd->defaultVal[5]); - pAlterMsg->cfg.maxQueryTime = htobe64(pCmd->defaultVal[6]); - pAlterMsg->cfg.maxConnections = htonl((int32_t)pCmd->defaultVal[7]); - pAlterMsg->cfg.accessState = (int8_t)pCmd->defaultVal[8]; + pMsg += sizeof(SCreateUserMsg); + pCmd->payloadLen = pMsg - pStart; - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + if (pUser->type == TSDB_ALTER_USER_PASSWD || pUser->type == TSDB_ALTER_USER_PRIVILEGES) { + pCmd->msgType = TSDB_MSG_TYPE_ALTER_USER; + } else { + pCmd->msgType = TSDB_MSG_TYPE_CREATE_USER; + } - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildCreateAcctMsg(SSqlObj *pSql) { - int msgLen = tscBuildAcctMsgImpl(pSql); - pSql->cmd.msgType = TSDB_MSG_TYPE_CREATE_ACCT; - return msgLen; -} +int32_t tscBuildCfgDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + char * pStart = NULL; + SSqlCmd *pCmd = &pSql->cmd; -int tscBuildAlterAcctMsg(SSqlObj *pSql) { - int msgLen = tscBuildAcctMsgImpl(pSql); - pSql->cmd.msgType = TSDB_MSG_TYPE_ALTER_ACCT; - return msgLen; -} + char *pMsg = doBuildMsgHeader(pSql, &pStart); + pMsg += sizeof(SCfgMsg); -int tscBuildAlterUserMsg(SSqlObj *pSql) { - SAlterUserMsg *pAlterMsg; - char * pMsg, *pStart; - int msgLen = 0; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_CFG_PNODE; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + return TSDB_CODE_SUCCESS; +} - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; +char *doBuildMsgHeader(SSqlObj *pSql, char **pStart) { + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + + char *pMsg = pCmd->payload + tsRpcHeadSize; + *pStart = pMsg; SMgmtHead *pMgmt = (SMgmtHead *)pMsg; strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - - pAlterMsg = (SCreateUserMsg *)pMsg; - strcpy(pAlterMsg->user, pMeterMetaInfo->name); - strcpy(pAlterMsg->pass, pCmd->payload); - pAlterMsg->flag = pCmd->order.order; - pAlterMsg->privilege = (char)pCmd->count; - pMsg += sizeof(SAlterUserMsg); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_ALTER_USER; + pMsg += sizeof(SMgmtHead); - return msgLen; + return pMsg; } -int tscBuildCfgDnodeMsg(SSqlObj *pSql) { - SCfgMsg *pCfg; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropDbMsg *pDropDbMsg; + char * pMsg, *pStart; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SSqlCmd *pCmd = &pSql->cmd; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); + pDropDbMsg = (SDropDbMsg *)pMsg; - pCfg = (SCfgMsg *)pMsg; - strcpy(pCfg->ip, pMeterMetaInfo->name); - strcpy(pCfg->config, pCmd->payload); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + strncpy(pDropDbMsg->db, pMeterMetaInfo->name, tListLen(pDropDbMsg->db)); + pDropDbMsg->ignoreNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0; - pMsg += sizeof(SCfgMsg); + pMsg += sizeof(SDropDbMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_CFG_PNODE; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_DROP_DB; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropDbMsg(SSqlObj *pSql) { - SDropDbMsg *pDropDbMsg; - char * pMsg, *pStart; - int msgLen = 0; +int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropTableMsg *pDropTableMsg; + char * pMsg, *pStart; + int msgLen = 0; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd *pCmd = &pSql->cmd; - pMsg = pCmd->payload + tsRpcHeadSize; + //pMsg = doBuildMsgHeader(pSql, &pStart); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + + pMsg = pCmd->payload + tsRpcHeadSize; pStart = pMsg; SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); + tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); pMsg += sizeof(SMgmtHead); - pDropDbMsg = (SDropDbMsg *)pMsg; - strncpy(pDropDbMsg->db, pMeterMetaInfo->name, tListLen(pDropDbMsg->db)); + pDropTableMsg = (SDropTableMsg *)pMsg; - pDropDbMsg->ignoreNotExists = htons(pCmd->existsCheck ? 1 : 0); + strcpy(pDropTableMsg->meterId, pMeterMetaInfo->name); - pMsg += sizeof(SDropDbMsg); + pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0; + pMsg += sizeof(SDropTableMsg); msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_DB; + pCmd->msgType = TSDB_MSG_TYPE_DROP_TABLE; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropUserMsg(SSqlObj *pSql) { - SDropUserMsg *pDropMsg; - char * pMsg, *pStart; - int msgLen = 0; +int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropDnodeMsg *pDrop; + char * pMsg, *pStart; SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); + pDrop = (SDropDnodeMsg *)pMsg; - pDropMsg = (SDropUserMsg *)pMsg; - strcpy(pDropMsg->user, pMeterMetaInfo->name); + strcpy(pDrop->ip, pMeterMetaInfo->name); - pMsg += sizeof(SDropUserMsg); + pMsg += sizeof(SDropDnodeMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_USER; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_DROP_DNODE; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropAcctMsg(SSqlObj *pSql) { - SDropAcctMsg *pDropMsg; +int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropUserMsg *pDropMsg; char * pMsg, *pStart; - int msgLen = 0; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SSqlCmd *pCmd = &pSql->cmd; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); + pDropMsg = (SDropUserMsg *)pMsg; - pDropMsg = (SDropAcctMsg *)pMsg; + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); strcpy(pDropMsg->user, pMeterMetaInfo->name); - pMsg += sizeof(SDropAcctMsg); + pMsg += sizeof(SDropUserMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_ACCT; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_DROP_USER; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildUseDbMsg(SSqlObj *pSql) { +int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SUseDbMsg *pUseDbMsg; char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + SSqlCmd *pCmd = &pSql->cmd; + pMsg = doBuildMsgHeader(pSql, &pStart); pUseDbMsg = (SUseDbMsg *)pMsg; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); strcpy(pUseDbMsg->db, pMeterMetaInfo->name); pMsg += sizeof(SUseDbMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_USE_DB; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildShowMsg(SSqlObj *pSql) { +int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SShowMsg *pShowMsg; char * pMsg, *pStart; int msgLen = 0; @@ -2162,10 +2180,6 @@ int tscBuildShowMsg(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; STscObj *pObj = pSql->pTscObj; - assert(pCmd->payloadLen < TSDB_SQLCMD_SIZE); - char payload[TSDB_SQLCMD_SIZE] = {0}; - memcpy(payload, pCmd->payload, pCmd->payloadLen); - int32_t size = minMsgSize() + sizeof(SMgmtHead) + sizeof(SShowTableMsg) + pCmd->payloadLen + TSDB_EXTRA_PAYLOAD_SIZE; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for show msg", pSql); @@ -2177,11 +2191,11 @@ int tscBuildShowMsg(SSqlObj *pSql) { SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); size_t nameLen = strlen(pMeterMetaInfo->name); if (nameLen > 0) { - strcpy(pMgmt->db, pMeterMetaInfo->name); + strcpy(pMgmt->db, pMeterMetaInfo->name); // prefix is set here } else { strcpy(pMgmt->db, pObj->db); } @@ -2189,153 +2203,109 @@ int tscBuildShowMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); pShowMsg = (SShowMsg *)pMsg; - pShowMsg->type = pCmd->showType; + SShowInfo *pShowInfo = &pInfo->pDCLInfo->showOpt; - if ((pShowMsg->type == TSDB_MGMT_TABLE_TABLE || pShowMsg->type == TSDB_MGMT_TABLE_METRIC || pShowMsg->type == TSDB_MGMT_TABLE_VNODES || pShowMsg->type == TSDB_MGMT_TABLE_VGROUP) - && pCmd->payloadLen != 0) { - // only show tables support wildcard query - pShowMsg->payloadLen = htons(pCmd->payloadLen); - memcpy(pShowMsg->payload, payload, pCmd->payloadLen); - } + pShowMsg->type = pShowInfo->showType; - pMsg += (sizeof(SShowTableMsg) + pCmd->payloadLen); + if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) { + SSQLToken *pPattern = &pShowInfo->pattern; + if (pPattern->type > 0) { // only show tables support wildcard query + strncpy(pShowMsg->payload, pPattern->z, pPattern->n); + pShowMsg->payloadLen = htons(pPattern->n); + } + pMsg += (sizeof(SShowTableMsg) + pPattern->n); + } else { + SSQLToken *pIpAddr = &pShowInfo->prefix; + assert(pIpAddr->n > 0 && pIpAddr->type > 0); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + strncpy(pShowMsg->payload, pIpAddr->z, pIpAddr->n); + pShowMsg->payloadLen = htons(pIpAddr->n); + + pMsg += (sizeof(SShowTableMsg) + pIpAddr->n); + } + + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_SHOW; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } -int tscBuildKillQueryMsg(SSqlObj *pSql) { +int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SKillQuery *pKill; char * pMsg, *pStart; - int msgLen = 0; SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); pKill = (SKillQuery *)pMsg; - pKill->handle = 0; - strcpy(pKill->queryId, pCmd->payload); - - pMsg += sizeof(SKillQuery); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_KILL_QUERY; - - return msgLen; -} - -int tscBuildKillStreamMsg(SSqlObj *pSql) { - SKillStream *pKill; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - - pKill = (SKillStream *)pMsg; - pKill->handle = 0; - strcpy(pKill->queryId, pCmd->payload); - - pMsg += sizeof(SKillStream); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_KILL_STREAM; - return msgLen; -} - -int tscBuildKillConnectionMsg(SSqlObj *pSql) { - SKillConnection *pKill; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - - pKill = (SKillStream *)pMsg; pKill->handle = 0; - strcpy(pKill->queryId, pCmd->payload); + strncpy(pKill->queryId, pInfo->pDCLInfo->ip.z, pInfo->pDCLInfo->ip.n); - pMsg += sizeof(SKillStream); + pMsg += sizeof(SKillQuery); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_KILL_CONNECTION; + pCmd->payloadLen = pMsg - pStart; - return msgLen; + switch (pCmd->command) { + case TSDB_SQL_KILL_QUERY: + pCmd->msgType = TSDB_MSG_TYPE_KILL_QUERY; + break; + case TSDB_SQL_KILL_CONNECTION: + pCmd->msgType = TSDB_MSG_TYPE_KILL_CONNECTION; + break; + case TSDB_SQL_KILL_STREAM: + pCmd->msgType = TSDB_MSG_TYPE_KILL_STREAM; + break; + } + return TSDB_CODE_SUCCESS; } -int tscEstimateCreateTableMsgLength(SSqlObj *pSql) { +int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &(pSql->cmd); int32_t size = minMsgSize() + sizeof(SMgmtHead) + sizeof(SCreateTableMsg); - if (pCmd->numOfCols == 0 && pCmd->count == 0) { + SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; + if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) { size += sizeof(STagData); } else { size += sizeof(SSchema) * (pCmd->numOfCols + pCmd->count); } - if (strlen(pCmd->payload) > 0) size += strlen(pCmd->payload) + 1; + if (pCreateTableInfo->pSelect != NULL) { + size += (pCreateTableInfo->pSelect->selectToken.n + 1); + } return size + TSDB_EXTRA_PAYLOAD_SIZE; } -int tscBuildCreateTableMsg(SSqlObj *pSql) { +int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SCreateTableMsg *pCreateTableMsg; char * pMsg, *pStart; int msgLen = 0; SSchema * pSchema; int size = 0; - // tmp variable to - // 1. save tags data in order to avoid too long tag values overlapped by header - // 2. save the selection clause, in create table as .. sql string - char *tmpData = calloc(1, pSql->cmd.allocSize); + SSqlCmd *pCmd = &pSql->cmd; - // STagData is in binary format, strncpy is not available - memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); - - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); // Reallocate the payload size - size = tscEstimateCreateTableMsgLength(pSql); + size = tscEstimateCreateTableMsgLength(pSql, pInfo); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for create table msg", pSql); - free(tmpData); - return -1; + return TSDB_CODE_CLI_OUT_OF_MEMORY; } pMsg = pCmd->payload + tsRpcHeadSize; pStart = pMsg; SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - // use dbinfo from meterid without modifying current db info + + // use dbinfo from table id without modifying current db info tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); pMsg += sizeof(SMgmtHead); @@ -2343,70 +2313,69 @@ int tscBuildCreateTableMsg(SSqlObj *pSql) { pCreateTableMsg = (SCreateTableMsg *)pMsg; strcpy(pCreateTableMsg->meterId, pMeterMetaInfo->name); - pCreateTableMsg->igExists = pCmd->existsCheck ? 1 : 0; + SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo; + + pCreateTableMsg->igExists = pCreateTable->existCheck ? 1 : 0; + pCreateTableMsg->numOfColumns = htons(pCmd->numOfCols); pCreateTableMsg->numOfTags = htons(pCmd->count); - pMsg = (char *)pCreateTableMsg->schema; pCreateTableMsg->sqlLen = 0; - short sqlLen = (short)(strlen(tmpData) + 1); + pMsg = (char *)pCreateTableMsg->schema; - if (pCmd->numOfCols == 0 && pCmd->count == 0) { - // create by using metric, tags value - memcpy(pMsg, tmpData, sizeof(STagData)); + int8_t type = pInfo->pCreateTableInfo->type; + if (type == TSQL_CREATE_TABLE_FROM_STABLE) { // create by using super table, tags value + memcpy(pMsg, &pInfo->pCreateTableInfo->usingInfo.tagdata, sizeof(STagData)); pMsg += sizeof(STagData); - } else { - // create metric/create normal meter + } else { // create (super) table pSchema = pCreateTableMsg->schema; + for (int i = 0; i < pCmd->numOfCols + pCmd->count; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); pSchema->type = pField->type; strcpy(pSchema->name, pField->name); pSchema->bytes = htons(pField->bytes); + pSchema++; } pMsg = (char *)pSchema; + if (type == TSQL_CREATE_STREAM) { // check if it is a stream sql + SQuerySQL *pQuerySql = pInfo->pCreateTableInfo->pSelect; - // check if it is a stream sql - if (sqlLen > 1) { - memcpy(pMsg, tmpData, sqlLen); - pMsg[sqlLen - 1] = 0; - - pCreateTableMsg->sqlLen = htons(sqlLen); - pMsg += sqlLen; + strncpy(pMsg, pQuerySql->selectToken.z, pQuerySql->selectToken.n + 1); + pCreateTableMsg->sqlLen = htons(pQuerySql->selectToken.n + 1); + pMsg += pQuerySql->selectToken.n + 1; } } - tfree(tmpData); - tscClearFieldInfo(&pCmd->fieldsInfo); + tscClearFieldInfo(&pQueryInfo->fieldsInfo); msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_CREATE_TABLE; assert(msgLen + minMsgSize() <= size); - return msgLen; + return TSDB_CODE_SUCCESS; } int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) { - return minMsgSize() + sizeof(SMgmtHead) + sizeof(SAlterTableMsg) + sizeof(SSchema) * pCmd->numOfCols + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + return minMsgSize() + sizeof(SMgmtHead) + sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) + TSDB_EXTRA_PAYLOAD_SIZE; } -int tscBuildAlterTableMsg(SSqlObj *pSql) { +int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SAlterTableMsg *pAlterTableMsg; char * pMsg, *pStart; int msgLen = 0; int size = 0; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); - char buf[TSDB_MAX_TAGS_LEN] = {0}; - int32_t len = (TSDB_MAX_TAGS_LEN < pCmd->allocSize) ? TSDB_MAX_TAGS_LEN : pCmd->allocSize; - memcpy(buf, pCmd->payload, len); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); size = tscEstimateAlterTableMsgLength(pCmd); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { @@ -2421,15 +2390,18 @@ int tscBuildAlterTableMsg(SSqlObj *pSql) { tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); pMsg += sizeof(SMgmtHead); + SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo; + pAlterTableMsg = (SAlterTableMsg *)pMsg; strcpy(pAlterTableMsg->meterId, pMeterMetaInfo->name); - pAlterTableMsg->type = htons(pCmd->count); - pAlterTableMsg->numOfCols = htons(pCmd->numOfCols); - memcpy(pAlterTableMsg->tagVal, buf, TSDB_MAX_TAGS_LEN); + pAlterTableMsg->type = htons(pAlterInfo->type); + + pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo)); + memcpy(pAlterTableMsg->tagVal, pAlterInfo->tagData.data, TSDB_MAX_TAGS_LEN); SSchema *pSchema = pAlterTableMsg->schema; - for (int i = 0; i < pCmd->numOfCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + for (int i = 0; i < tscNumOfFields(pQueryInfo); ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); pSchema->type = pField->type; strcpy(pSchema->name, pField->name); @@ -2444,17 +2416,18 @@ int tscBuildAlterTableMsg(SSqlObj *pSql) { pCmd->msgType = TSDB_MSG_TYPE_ALTER_TABLE; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } -int tscAlterDbMsg(SSqlObj *pSql) { +int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SAlterDbMsg *pAlterDbMsg; char * pMsg, *pStart; int msgLen = 0; SSqlCmd * pCmd = &pSql->cmd; STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); pStart = pCmd->payload + tsRpcHeadSize; pMsg = pStart; @@ -2472,38 +2445,10 @@ int tscAlterDbMsg(SSqlObj *pSql) { pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_ALTER_DB; - return msgLen; -} - -int tscBuildDropTableMsg(SSqlObj *pSql) { - SDropTableMsg *pDropTableMsg; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); - pMsg += sizeof(SMgmtHead); - - pDropTableMsg = (SDropTableMsg *)pMsg; - strcpy(pDropTableMsg->meterId, pMeterMetaInfo->name); - - pDropTableMsg->igNotExists = pCmd->existsCheck ? 1 : 0; - pMsg += sizeof(SDropTableMsg); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_TABLE; - - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { +int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char *pMsg, *pStart; int msgLen = 0; @@ -2514,7 +2459,8 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); size_t nameLen = strlen(pMeterMetaInfo->name); if (nameLen > 0) { @@ -2525,35 +2471,35 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); - *((uint64_t *) pMsg) = pSql->res.qhandle; + *((uint64_t *)pMsg) = pSql->res.qhandle; pMsg += sizeof(pSql->res.qhandle); - *((uint16_t*) pMsg) = htons(pCmd->type); - pMsg += sizeof(pCmd->type); + *((uint16_t *)pMsg) = htons(pQueryInfo->type); + pMsg += sizeof(pQueryInfo->type); msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_RETRIEVE; - return msgLen; + return TSDB_CODE_SUCCESS; } -static int tscSetResultPointer(SSqlCmd *pCmd, SSqlRes *pRes) { - if (tscCreateResPointerInfo(pCmd, pRes) != TSDB_CODE_SUCCESS) { +static int tscSetResultPointer(SQueryInfo *pQueryInfo, SSqlRes *pRes) { + if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) { return pRes->code; } - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - int16_t offset = tscFieldInfoGetOffset(pCmd, i); + for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); pRes->bytes[i] = pField->bytes; - if (pCmd->order.order == TSQL_SO_DESC) { - pRes->bytes[i] = -pRes->bytes[i]; - pRes->tsrow[i] = ((pRes->data + offset * pRes->numOfRows) + (pRes->numOfRows - 1) * pField->bytes); - } else { +// if (pQueryInfo->order.order == TSQL_SO_DESC) { +// pRes->bytes[i] = -pRes->bytes[i]; +// pRes->tsrow[i] = ((pRes->data + offset * pRes->numOfRows) + (pRes->numOfRows - 1) * pField->bytes); +// } else { pRes->tsrow[i] = (pRes->data + offset * pRes->numOfRows); - } +// } } return 0; @@ -2569,6 +2515,8 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + pRes->code = TSDB_CODE_SUCCESS; if (pRes->rspType == 0) { @@ -2576,9 +2524,7 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { pRes->row = 0; pRes->rspType = 1; - tscSetResultPointer(pCmd, pRes); - pRes->row = 0; - + tscSetResultPointer(pQueryInfo, pRes); } else { tscResetForNextRetrieve(pRes); } @@ -2597,7 +2543,7 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { int tscProcessDescribeTableRsp(SSqlObj *pSql) { SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int32_t numOfRes = pMeterMetaInfo->pMeterMeta->numOfColumns + pMeterMetaInfo->pMeterMeta->numOfTags; @@ -2605,11 +2551,13 @@ int tscProcessDescribeTableRsp(SSqlObj *pSql) { } int tscProcessTagRetrieveRsp(SSqlObj *pSql) { - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int32_t numOfRes = 0; - if (tscSqlExprGet(pCmd, 0)->functionId == TSDB_FUNC_TAGPRJ) { + if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_TAGPRJ) { numOfRes = pMeterMetaInfo->pMetricMeta->numOfMeters; } else { numOfRes = 1; // for count function, there is only one output. @@ -2621,18 +2569,19 @@ int tscProcessRetrieveMetricRsp(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - pRes->code = tscLocalDoReduce(pSql); + pRes->code = tscDoLocalreduce(pSql); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { - tscSetResultPointer(pCmd, pRes); + tscSetResultPointer(pQueryInfo, pRes); } pRes->row = 0; - uint8_t code = pSql->res.code; + uint8_t code = pRes->code; if (pSql->fp) { // async retrieve metric data - if (pSql->res.code == TSDB_CODE_SUCCESS) { - (*pSql->fp)(pSql->param, pSql, pSql->res.numOfRows); + if (pRes->code == TSDB_CODE_SUCCESS) { + (*pSql->fp)(pSql->param, pSql, pRes->numOfRows); } else { tscQueueAsyncRes(pSql); } @@ -2643,10 +2592,9 @@ int tscProcessRetrieveMetricRsp(SSqlObj *pSql) { int tscProcessEmptyResultRsp(SSqlObj *pSql) { return tscLocalResultCommonBuilder(pSql, 0); } -int tscBuildConnectMsg(SSqlObj *pSql) { +int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SConnectMsg *pConnect; char * pMsg, *pStart; - int msgLen = 0; SSqlCmd *pCmd = &pSql->cmd; STscObj *pObj = pSql->pTscObj; @@ -2664,14 +2612,13 @@ int tscBuildConnectMsg(SSqlObj *pSql) { pMsg += sizeof(SConnectMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_CONNECT; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildMeterMetaMsg(SSqlObj *pSql) { +int tscBuildMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SMeterInfoMsg *pInfoMsg; char * pMsg, *pStart; int msgLen = 0; @@ -2679,13 +2626,18 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { char *tmpData = 0; if (pSql->cmd.allocSize > 0) { tmpData = calloc(1, pSql->cmd.allocSize); - if (NULL == tmpData) return -1; + if (NULL == tmpData) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } + // STagData is in binary format, strncpy is not available memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); } - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); pMsg = pCmd->payload + tsRpcHeadSize; pStart = pMsg; @@ -2697,10 +2649,10 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { pInfoMsg = (SMeterInfoMsg *)pMsg; strcpy(pInfoMsg->meterId, pMeterMetaInfo->name); - pInfoMsg->createFlag = htons((uint16_t)pCmd->defaultVal[0]); + pInfoMsg->createFlag = htons(pSql->cmd.createOnDemand ? 1 : 0); pMsg += sizeof(SMeterInfoMsg); - if (pCmd->defaultVal[0] != 0) { + if (pSql->cmd.createOnDemand) { memcpy(pInfoMsg->tags, tmpData, sizeof(STagData)); pMsg += sizeof(STagData); } @@ -2712,7 +2664,7 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { tfree(tmpData); assert(msgLen + minMsgSize() <= pCmd->allocSize); - return msgLen; + return TSDB_CODE_SUCCESS; } /** @@ -2720,7 +2672,7 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { * | SMgmtHead | SMultiMeterInfoMsg | meterId0 | meterId1 | meterId2 | ...... * no used 4B **/ -int tscBuildMultiMeterMetaMsg(SSqlObj *pSql) { +int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; // copy payload content to temp buff @@ -2758,35 +2710,38 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql) { static int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) { const int32_t defaultSize = minMsgSize() + sizeof(SMetricMetaMsg) + sizeof(SMgmtHead) + sizeof(int16_t) * TSDB_MAX_TAGS; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); int32_t n = 0; - for (int32_t i = 0; i < pCmd->tagCond.numOfTagCond; ++i) { - n += strlen(pCmd->tagCond.cond[i].cond); + for (int32_t i = 0; i < pQueryInfo->tagCond.numOfTagCond; ++i) { + n += strlen(pQueryInfo->tagCond.cond[i].cond); } int32_t tagLen = n * TSDB_NCHAR_SIZE; - if (pCmd->tagCond.tbnameCond.cond != NULL) { - tagLen += strlen(pCmd->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE; + if (pQueryInfo->tagCond.tbnameCond.cond != NULL) { + tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE; } - + int32_t joinCondLen = (TSDB_METER_ID_LEN + sizeof(int16_t)) * 2; - int32_t elemSize = sizeof(SMetricMetaElemMsg) * pCmd->numOfTables; + int32_t elemSize = sizeof(SMetricMetaElemMsg) * pQueryInfo->numOfTables; int32_t len = tagLen + joinCondLen + elemSize + defaultSize; return MAX(len, TSDB_DEFAULT_PAYLOAD_SIZE); } -int tscBuildMetricMetaMsg(SSqlObj *pSql) { +int tscBuildMetricMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SMetricMetaMsg *pMetaMsg; char * pMsg, *pStart; int msgLen = 0; int tableIndex = 0; - SSqlCmd * pCmd = &pSql->cmd; - STagCond *pTagCond = &pCmd->tagCond; + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + STagCond *pTagCond = &pQueryInfo->tagCond; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); int32_t size = tscEstimateMetricMetaMsgSize(pCmd); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { @@ -2803,7 +2758,7 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); pMetaMsg = (SMetricMetaMsg *)pMsg; - pMetaMsg->numOfMeters = htonl(pCmd->numOfTables); + pMetaMsg->numOfMeters = htonl(pQueryInfo->numOfTables); pMsg += sizeof(SMetricMetaMsg); @@ -2825,8 +2780,8 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { *(int16_t *)pMsg = pTagCond->joinInfo.right.tagCol; pMsg += sizeof(int16_t); - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, i); uint64_t uid = pMeterMetaInfo->pMeterMeta->uid; offset = pMsg - (char *)pMetaMsg; @@ -2841,7 +2796,7 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { SCond *pCond = tsGetMetricQueryCondPos(pTagCond, uid); if (pCond != NULL) { condLen = strlen(pCond->cond) + 1; - + bool ret = taosMbsToUcs4(pCond->cond, condLen, pMsg, condLen * TSDB_NCHAR_SIZE); if (!ret) { tscError("%p mbs to ucs4 failed:%s", pSql, tsGetMetricQueryCondPos(pTagCond, uid)); @@ -2861,7 +2816,7 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { offset = pMsg - (char *)pMetaMsg; pElem->tableCond = htonl(offset); - + uint32_t len = strlen(pTagCond->tbnameCond.cond); pElem->tableCondLen = htonl(len); @@ -2869,7 +2824,7 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { pMsg += len; } - SSqlGroupbyExpr *pGroupby = &pCmd->groupbyExpr; + SSqlGroupbyExpr *pGroupby = &pQueryInfo->groupbyExpr; if (pGroupby->tableIndex != i && pGroupby->numOfGroupCols > 0) { pElem->orderType = 0; @@ -2887,15 +2842,15 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { offset = pMsg - (char *)pMetaMsg; pElem->groupbyTagColumnList = htonl(offset); - for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { - SColIndexEx *pCol = &pCmd->groupbyExpr.columnInfo[j]; - SColIndexEx* pDestCol = (SColIndexEx*) pMsg; - + for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { + SColIndexEx *pCol = &pQueryInfo->groupbyExpr.columnInfo[j]; + SColIndexEx *pDestCol = (SColIndexEx *)pMsg; + pDestCol->colIdxInBuf = 0; pDestCol->colIdx = htons(pCol->colIdx); pDestCol->colId = htons(pDestCol->colId); pDestCol->flag = htons(pDestCol->flag); - + pMsg += sizeof(SColIndexEx); } } @@ -2912,7 +2867,8 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_METRIC_META; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } int tscEstimateHeartBeatMsgLength(SSqlObj *pSql) { @@ -2938,7 +2894,7 @@ int tscEstimateHeartBeatMsgLength(SSqlObj *pSql) { return size + TSDB_EXTRA_PAYLOAD_SIZE; } -int tscBuildHeartBeatMsg(SSqlObj *pSql) { +int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char *pMsg, *pStart; int msgLen = 0; int size = 0; @@ -2972,28 +2928,6 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql) { return msgLen; } -int tscProcessRetrieveRspFromMgmt(SSqlObj *pSql) { - SSqlRes *pRes = &pSql->res; - SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - - SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)(pRes->pRsp); - pRes->numOfRows = htonl(pRetrieve->numOfRows); - pRes->precision = htons(pRes->precision); - - pRes->data = pRetrieve->data; - - tscSetResultPointer(pCmd, pRes); - - if (pRes->numOfRows == 0) { - taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); - pSql->thandle = NULL; - } - - pRes->row = 0; - return 0; -} - int tscProcessMeterMetaRsp(SSqlObj *pSql) { SMeterMeta *pMeta; SSchema * pSchema; @@ -3022,17 +2956,12 @@ int tscProcessMeterMetaRsp(SSqlObj *pSql) { pMeta->numOfColumns = htons(pMeta->numOfColumns); - if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) { - tscError("invalid tag value count:%d", pMeta->numOfTags); - return TSDB_CODE_INVALID_VALUE; - } - if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) { tscError("invalid numOfTags:%d", pMeta->numOfTags); return TSDB_CODE_INVALID_VALUE; } - if (pMeta->numOfColumns > TSDB_MAX_COLUMNS || pMeta->numOfColumns < 0) { + if (pMeta->numOfColumns > TSDB_MAX_COLUMNS || pMeta->numOfColumns <= 0) { tscError("invalid numOfColumns:%d", pMeta->numOfColumns); return TSDB_CODE_INVALID_VALUE; } @@ -3075,11 +3004,12 @@ int tscProcessMeterMetaRsp(SSqlObj *pSql) { pMeta->index = 0; // todo add one more function: taosAddDataIfNotExists(); - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), false); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + assert(pMeterMetaInfo->pMeterMeta == NULL); pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosAddDataIntoCache(tscCacheHandle, pMeterMetaInfo->name, (char *)pMeta, size, tsMeterMetaKeepTimer); + // todo handle out of memory case if (pMeterMetaInfo->pMeterMeta == NULL) return 0; return TSDB_CODE_OTHERS; @@ -3269,10 +3199,10 @@ int tscProcessMetricMetaRsp(SSqlObj *pSql) { for (int32_t j = 0; j < pSidLists->numOfSids; ++j) { pLists->pSidExtInfoList[j] = pBuf - (char *)pLists; memcpy(pBuf, rsp, elemSize); - - ((SMeterSidExtInfo*) pBuf)->uid = htobe64(((SMeterSidExtInfo*) pBuf)->uid); - ((SMeterSidExtInfo*) pBuf)->sid = htonl(((SMeterSidExtInfo*) pBuf)->sid); - + + ((SMeterSidExtInfo *)pBuf)->uid = htobe64(((SMeterSidExtInfo *)pBuf)->uid); + ((SMeterSidExtInfo *)pBuf)->sid = htonl(((SMeterSidExtInfo *)pBuf)->sid); + rsp += elemSize; pBuf += elemSize; } @@ -3281,11 +3211,12 @@ int tscProcessMetricMetaRsp(SSqlObj *pSql) { sizes[k] = pBuf - (char *)pNewMetricMeta; } + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); for (int32_t i = 0; i < num; ++i) { char name[TSDB_MAX_TAGS_LEN + 1] = {0}; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, i); - tscGetMetricMetaCacheKey(&pSql->cmd, name, pMeterMetaInfo->pMeterMeta->uid); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + tscGetMetricMetaCacheKey(pQueryInfo, name, pMeterMetaInfo->pMeterMeta->uid); #ifdef _DEBUG_VIEW printf("generate the metric key:%s, index:%d\n", name, i); @@ -3326,9 +3257,12 @@ int tscProcessShowRsp(SSqlObj *pSql) { SSchema * pSchema; char key[20]; - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); //? + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); pShow = (SShowRspMsg *)pRes->pRsp; pRes->qhandle = pShow->qhandle; @@ -3345,7 +3279,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { pSchema++; } - key[0] = pCmd->showType + 'a'; + key[0] = pCmd->msgType + 'a'; strcpy(key + 1, "showlist"); taosRemoveDataFromCache(tscCacheHandle, (void *)&(pMeterMetaInfo->pMeterMeta), false); @@ -3353,24 +3287,24 @@ int tscProcessShowRsp(SSqlObj *pSql) { int32_t size = pMeta->numOfColumns * sizeof(SSchema) + sizeof(SMeterMeta); pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosAddDataIntoCache(tscCacheHandle, key, (char *)pMeta, size, tsMeterMetaKeepTimer); - pCmd->numOfCols = pCmd->fieldsInfo.numOfOutputCols; + pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; SSchema *pMeterSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - tscColumnBaseInfoReserve(&pCmd->colList, pMeta->numOfColumns); + tscColumnBaseInfoReserve(&pQueryInfo->colList, pMeta->numOfColumns); SColumnIndex index = {0}; for (int16_t i = 0; i < pMeta->numOfColumns; ++i) { index.columnIndex = i; - tscColumnBaseInfoInsert(pCmd, &index); - tscFieldInfoSetValFromSchema(&pCmd->fieldsInfo, i, &pMeterSchema[i]); + tscColumnBaseInfoInsert(pQueryInfo, &index); + tscFieldInfoSetValFromSchema(&pQueryInfo->fieldsInfo, i, &pMeterSchema[i]); } - tscFieldInfoCalOffset(pCmd); + tscFieldInfoCalOffset(pQueryInfo); return 0; } int tscProcessConnectRsp(SSqlObj *pSql) { - char temp[TSDB_METER_ID_LEN*2]; + char temp[TSDB_METER_ID_LEN * 2]; SConnectRsp *pConnect; STscObj *pObj = pSql->pTscObj; @@ -3378,25 +3312,16 @@ int tscProcessConnectRsp(SSqlObj *pSql) { pConnect = (SConnectRsp *)pRes->pRsp; strcpy(pObj->acctId, pConnect->acctId); // copy acctId from response - int32_t len =sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); - + int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); + assert(len <= tListLen(pObj->db)); strncpy(pObj->db, temp, tListLen(pObj->db)); -#ifdef CLUSTER SIpList * pIpList; char *rsp = pRes->pRsp + sizeof(SConnectRsp); pIpList = (SIpList *)rsp; - tscMgmtIpList.numOfIps = pIpList->numOfIps; - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - } - - rsp += sizeof(SIpList) + sizeof(int32_t) * pIpList->numOfIps; + tscSetMgmtIpList(pIpList); - tscPrintMgmtIp(); -#endif strcpy(pObj->sversion, pConnect->version); pObj->writeAuth = pConnect->writeAuth; pObj->superAuth = pConnect->superAuth; @@ -3407,7 +3332,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) { int tscProcessUseDbRsp(SSqlObj *pSql) { STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); strcpy(pObj->db, pMeterMetaInfo->name); return 0; @@ -3419,7 +3344,7 @@ int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { } int tscProcessDropTableRsp(SSqlObj *pSql) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pMeterMetaInfo->name); if (pMeterMeta == NULL) { @@ -3446,7 +3371,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) { } int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pMeterMetaInfo->name); if (pMeterMeta == NULL) { /* not in cache, abort */ @@ -3457,12 +3382,12 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { taosRemoveDataFromCache(tscCacheHandle, (void **)&pMeterMeta, true); if (pMeterMetaInfo->pMeterMeta) { - bool isMetric = UTIL_METER_IS_METRIC(pMeterMetaInfo); + bool isSuperTable = UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), true); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), true); - if (isMetric) { // if it is a metric, reset whole query cache + if (isSuperTable) { // if it is a super table, reset whole query cache tscTrace("%p reset query cache since table:%s is stable", pSql, pMeterMetaInfo->name); taosClearDataCache(tscCacheHandle); } @@ -3495,19 +3420,39 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { pRes->numOfRows = htonl(pRetrieve->numOfRows); pRes->precision = htons(pRetrieve->precision); pRes->offset = htobe64(pRetrieve->offset); - pRes->useconds = htobe64(pRetrieve->useconds); pRes->data = pRetrieve->data; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + tscSetResultPointer(pQueryInfo, pRes); + + if (pSql->pSubscription != NULL) { + int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; + + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, numOfCols - 1); + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, numOfCols - 1); + + char* p = pRes->data + (pField->bytes + offset) * pRes->numOfRows; + + int32_t numOfMeters = htonl(*(int32_t*)p); + p += sizeof(int32_t); + for (int i = 0; i < numOfMeters; i++) { + int64_t uid = htobe64(*(int64_t*)p); + p += sizeof(int64_t); + TSKEY key = htobe64(*(TSKEY*)p); + p += sizeof(TSKEY); + tscUpdateSubscriptionProgress(pSql->pSubscription, uid, key); + } + } - tscSetResultPointer(pCmd, pRes); pRes->row = 0; /** * If the query result is exhausted, or current query is to free resource at server side, * the connection will be recycled. */ - if ((pRes->numOfRows == 0 && !(tscProjectionQueryOnMetric(pCmd) && pRes->offset > 0)) || - ((pCmd->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE)) { + if ((pRes->numOfRows == 0 && !(tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && pRes->offset > 0)) || + ((pQueryInfo->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE)) { tscTrace("%p no result or free resource, recycle connection", pSql); taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); pSql->thandle = NULL; @@ -3519,21 +3464,23 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { } int tscProcessRetrieveRspFromLocal(SSqlObj *pSql) { - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; + SSqlRes * pRes = &pSql->res; + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)pRes->pRsp; pRes->numOfRows = htonl(pRetrieve->numOfRows); pRes->data = pRetrieve->data; - tscSetResultPointer(pCmd, pRes); + tscSetResultPointer(pQueryInfo, pRes); pRes->row = 0; return 0; } void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code); -static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { +static int32_t doGetMeterMetaFromServer(SSqlObj *pSql, SMeterMetaInfo *pMeterMetaInfo) { int32_t code = TSDB_CODE_SUCCESS; SSqlObj *pNew = calloc(1, sizeof(SSqlObj)); @@ -3541,23 +3488,29 @@ static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { tscError("%p malloc failed for new sqlobj to get meter meta", pSql); return TSDB_CODE_CLI_OUT_OF_MEMORY; } + pNew->pTscObj = pSql->pTscObj; pNew->signature = pNew; pNew->cmd.command = TSDB_SQL_META; - pNew->cmd.payload = NULL; - pNew->cmd.allocSize = 0; - pNew->cmd.defaultVal[0] = pSql->cmd.defaultVal[0]; // flag of create table if not exists + tscAddSubqueryInfo(&pNew->cmd); + + SQueryInfo *pNewQueryInfo = NULL; + tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo); + + pNew->cmd.createOnDemand = pSql->cmd.createOnDemand; // create table if not exists if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { tscError("%p malloc failed for payload to get meter meta", pSql); free(pNew); + return TSDB_CODE_CLI_OUT_OF_MEMORY; } - SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(&pNew->cmd); + SMeterMetaInfo *pNewMeterMetaInfo = tscAddEmptyMeterMetaInfo(pNewQueryInfo); + assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1); - strcpy(pMeterMetaInfo->name, meterId); - memcpy(pNew->cmd.payload, pSql->cmd.payload, TSDB_DEFAULT_PAYLOAD_SIZE); + strcpy(pNewMeterMetaInfo->name, pMeterMetaInfo->name); + memcpy(pNew->cmd.payload, pSql->cmd.payload, TSDB_DEFAULT_PAYLOAD_SIZE); // tag information if table does not exists. tscTrace("%p new pSqlObj:%p to get meterMeta", pSql, pNew); if (pSql->fp == NULL) { @@ -3565,14 +3518,17 @@ static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { tsem_init(&pNew->emptyRspSem, 0, 1); code = tscProcessSql(pNew); - SMeterMetaInfo *pInfo = tscGetMeterMetaInfo(&pSql->cmd, index); - // update cache only on success get metermeta + /* + * Update cache only on succeeding in getting metermeta. + * Transfer the ownership of metermeta to the new object, instead of invoking the release/acquire routine + */ if (code == TSDB_CODE_SUCCESS) { - pInfo->pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, meterId); + pMeterMetaInfo->pMeterMeta = taosTransferDataInCache(tscCacheHandle, (void**) &pNewMeterMetaInfo->pMeterMeta); + assert(pMeterMetaInfo->pMeterMeta != NULL); } - tscTrace("%p get meter meta complete, code:%d, pMeterMeta:%p", pSql, code, pInfo->pMeterMeta); + tscTrace("%p get meter meta complete, code:%d, pMeterMeta:%p", pSql, code, pMeterMetaInfo->pMeterMeta); tscFreeSqlObj(pNew); } else { @@ -3589,14 +3545,15 @@ static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { return code; } -int tscGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index); - - // if the SSqlCmd owns a metermeta, release it first - taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), false); - pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, meterId); +int tscGetMeterMeta(SSqlObj *pSql, SMeterMetaInfo *pMeterMetaInfo) { + assert(strlen(pMeterMetaInfo->name) != 0); + // If this SMeterMetaInfo owns a metermeta, release it first + if (pMeterMetaInfo->pMeterMeta != NULL) { + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), false); + } + + pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, pMeterMetaInfo->name); if (pMeterMetaInfo->pMeterMeta != NULL) { SMeterMeta *pMeterMeta = pMeterMetaInfo->pMeterMeta; @@ -3610,16 +3567,12 @@ int tscGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { * for async insert operation, release data block buffer before issue new object to get metermeta * because in metermeta callback function, the tscParse function will generate the submit data blocks */ - //if (pSql->fp != NULL && pSql->pStream == NULL) { - // tscFreeSqlCmdData(pCmd); - //} - - return tscDoGetMeterMeta(pSql, meterId, index); + return doGetMeterMetaFromServer(pSql, pMeterMetaInfo); } -int tscGetMeterMetaEx(SSqlObj *pSql, char *meterId, bool createIfNotExists) { - pSql->cmd.defaultVal[0] = createIfNotExists ? 1 : 0; - return tscGetMeterMeta(pSql, meterId, 0); +int tscGetMeterMetaEx(SSqlObj *pSql, SMeterMetaInfo *pMeterMetaInfo, bool createIfNotExists) { + pSql->cmd.createOnDemand = createIfNotExists; + return tscGetMeterMeta(pSql, pMeterMetaInfo); } /* @@ -3642,12 +3595,14 @@ static void tscWaitingForCreateTable(SSqlCmd *pCmd) { * @return status code */ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { - int code = 0; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + int code = 0; // handle metric meta renew process SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + // enforce the renew metermeta operation in async model if (pSql->fp == NULL) pSql->fp = (void *)0x1; @@ -3660,10 +3615,11 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql, pMeterMetaInfo->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid, pMeterMetaInfo->pMeterMeta); } - tscWaitingForCreateTable(&pSql->cmd); + + tscWaitingForCreateTable(pCmd); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), true); - code = tscDoGetMeterMeta(pSql, meterId, 0); // todo ?? + code = doGetMeterMetaFromServer(pSql, pMeterMetaInfo); // todo ?? } else { tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql, pMeterMetaInfo->pMeterMeta->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid, @@ -3679,33 +3635,35 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { return code; } -int tscGetMetricMeta(SSqlObj *pSql) { +int tscGetMetricMeta(SSqlObj *pSql, int32_t clauseIndex) { int code = TSDB_CODE_NETWORK_UNAVAIL; SSqlCmd *pCmd = &pSql->cmd; /* - * the vnode query condition is serialized into pCmd->payload, we need to rebuild key for metricmeta info in cache. + * the query condition is serialized into pCmd->payload, we need to rebuild key for metricmeta info in cache. */ - bool reqMetricMeta = false; - for (int32_t i = 0; i < pSql->cmd.numOfTables; ++i) { + bool required = false; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { char tagstr[TSDB_MAX_TAGS_LEN + 1] = {0}; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - tscGetMetricMetaCacheKey(pCmd, tagstr, pMeterMetaInfo->pMeterMeta->uid); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + tscGetMetricMetaCacheKey(pQueryInfo, tagstr, pMeterMetaInfo->pMeterMeta->uid); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), false); SMetricMeta *ppMeta = (SMetricMeta *)taosGetDataFromCache(tscCacheHandle, tagstr); if (ppMeta == NULL) { - reqMetricMeta = true; + required = true; break; } else { pMeterMetaInfo->pMetricMeta = ppMeta; } } - // all metricmeta are retrieved from cache, no need to query mgmt node - if (!reqMetricMeta) { + // all metricmeta for one clause are retrieved from cache, no need to retrieve metricmeta from management node + if (!required) { return TSDB_CODE_SUCCESS; } @@ -3714,12 +3672,17 @@ int tscGetMetricMeta(SSqlObj *pSql) { pNew->signature = pNew; pNew->cmd.command = TSDB_SQL_METRIC; - - for (int32_t i = 0; i < pSql->cmd.numOfTables; ++i) { - SMeterMetaInfo *pMMInfo = tscGetMeterMetaInfo(&pSql->cmd, i); + + SQueryInfo *pNewQueryInfo = NULL; + if ((code = tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo)) != TSDB_CODE_SUCCESS) { + return code; + } + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + SMeterMetaInfo *pMMInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pMMInfo->name); - tscAddMeterMetaInfo(&pNew->cmd, pMMInfo->name, pMeterMeta, NULL, pMMInfo->numOfTags, pMMInfo->tagColumnIndex); + tscAddMeterMetaInfo(pNewQueryInfo, pMMInfo->name, pMeterMeta, NULL, pMMInfo->numOfTags, pMMInfo->tagColumnIndex); } if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { @@ -3727,18 +3690,23 @@ int tscGetMetricMeta(SSqlObj *pSql) { return code; } - // the query condition on meter is serialized into payload - tscTagCondCopy(&pNew->cmd.tagCond, &pSql->cmd.tagCond); + tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond); - pNew->cmd.groupbyExpr = pSql->cmd.groupbyExpr; - pNew->cmd.numOfTables = pSql->cmd.numOfTables; + pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr; + pNewQueryInfo->numOfTables = pQueryInfo->numOfTables; - pNew->cmd.slimit = pSql->cmd.slimit; - pNew->cmd.order = pSql->cmd.order; + pNewQueryInfo->slimit = pQueryInfo->slimit; + pNewQueryInfo->order = pQueryInfo->order; + + STagCond* pTagCond = &pNewQueryInfo->tagCond; + tscTrace("%p new sqlobj:%p info, numOfTables:%d, slimit:%" PRId64 ", soffset:%" PRId64 ", order:%d, tbname cond:%s", + pSql, pNew, pNewQueryInfo->numOfTables, pNewQueryInfo->slimit.limit, pNewQueryInfo->slimit.offset, + pNewQueryInfo->order.order, pTagCond->tbnameCond.cond) - if (pSql->fp != NULL && pSql->pStream == NULL) { - tscFreeSqlCmdData(&pSql->cmd); - } +// if (pSql->fp != NULL && pSql->pStream == NULL) { +// pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); +// tscFreeSubqueryInfo(pCmd); +// } tscTrace("%p allocate new pSqlObj:%p to get metricMeta", pSql, pNew); if (pSql->fp == NULL) { @@ -3747,18 +3715,20 @@ int tscGetMetricMeta(SSqlObj *pSql) { code = tscProcessSql(pNew); - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - char tagstr[TSDB_MAX_TAGS_LEN] = {0}; - - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - tscGetMetricMetaCacheKey(pCmd, tagstr, pMeterMetaInfo->pMeterMeta->uid); + if (code == TSDB_CODE_SUCCESS) {//todo optimize the performance + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + char tagstr[TSDB_MAX_TAGS_LEN] = {0}; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + tscGetMetricMetaCacheKey(pQueryInfo, tagstr, pMeterMetaInfo->pMeterMeta->uid); #ifdef _DEBUG_VIEW - printf("create metric key:%s, index:%d\n", tagstr, i); + printf("create metric key:%s, index:%d\n", tagstr, i); #endif - - taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), false); - pMeterMetaInfo->pMetricMeta = (SMetricMeta *)taosGetDataFromCache(tscCacheHandle, tagstr); + + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), false); + pMeterMetaInfo->pMetricMeta = (SMetricMeta *)taosGetDataFromCache(tscCacheHandle, tagstr); + } } tscFreeSqlObj(pNew); @@ -3780,17 +3750,17 @@ void tscInitMsgs() { tscBuildMsg[TSDB_SQL_FETCH] = tscBuildRetrieveMsg; tscBuildMsg[TSDB_SQL_CREATE_DB] = tscBuildCreateDbMsg; - tscBuildMsg[TSDB_SQL_CREATE_USER] = tscBuildCreateUserMsg; + tscBuildMsg[TSDB_SQL_CREATE_USER] = tscBuildUserMsg; - tscBuildMsg[TSDB_SQL_CREATE_ACCT] = tscBuildCreateAcctMsg; - tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAlterAcctMsg; + tscBuildMsg[TSDB_SQL_CREATE_ACCT] = tscBuildAcctMsg; + tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg; tscBuildMsg[TSDB_SQL_CREATE_TABLE] = tscBuildCreateTableMsg; - tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserMsg; + tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropAcctMsg; tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropAcctMsg; tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg; tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg; - tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildAlterUserMsg; + tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg; tscBuildMsg[TSDB_SQL_CREATE_DNODE] = tscBuildCreateDnodeMsg; tscBuildMsg[TSDB_SQL_DROP_DNODE] = tscBuildDropDnodeMsg; tscBuildMsg[TSDB_SQL_CFG_DNODE] = tscBuildCfgDnodeMsg; @@ -3806,9 +3776,9 @@ void tscInitMsgs() { tscBuildMsg[TSDB_SQL_HB] = tscBuildHeartBeatMsg; tscBuildMsg[TSDB_SQL_SHOW] = tscBuildShowMsg; tscBuildMsg[TSDB_SQL_RETRIEVE] = tscBuildRetrieveFromMgmtMsg; - tscBuildMsg[TSDB_SQL_KILL_QUERY] = tscBuildKillQueryMsg; - tscBuildMsg[TSDB_SQL_KILL_STREAM] = tscBuildKillStreamMsg; - tscBuildMsg[TSDB_SQL_KILL_CONNECTION] = tscBuildKillConnectionMsg; + tscBuildMsg[TSDB_SQL_KILL_QUERY] = tscBuildKillMsg; + tscBuildMsg[TSDB_SQL_KILL_STREAM] = tscBuildKillMsg; + tscBuildMsg[TSDB_SQL_KILL_CONNECTION] = tscBuildKillMsg; tscProcessMsgRsp[TSDB_SQL_SELECT] = tscProcessQueryRsp; tscProcessMsgRsp[TSDB_SQL_FETCH] = tscProcessRetrieveRspFromVnode; @@ -3822,16 +3792,16 @@ void tscInitMsgs() { tscProcessMsgRsp[TSDB_SQL_MULTI_META] = tscProcessMultiMeterMetaRsp; tscProcessMsgRsp[TSDB_SQL_SHOW] = tscProcessShowRsp; - tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromVnode; // rsp handled by same function. + tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromVnode; // rsp handled by same function. tscProcessMsgRsp[TSDB_SQL_DESCRIBE_TABLE] = tscProcessDescribeTableRsp; - + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_TAGS] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CURRENT_DB] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CURRENT_USER] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_SERV_VERSION] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CLI_VERSION] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_SERV_STATUS] = tscProcessTagRetrieveRsp; - + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp; tscProcessMsgRsp[TSDB_SQL_RETRIEVE_METRIC] = tscProcessRetrieveMetricRsp; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index c9d9050a2914777875293ee0494d657698a8c681..6a03278a077bab0aa078685acd8a0ecd3402acce 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -14,6 +14,7 @@ */ #include "os.h" +#include "hash.h" #include "tcache.h" #include "tlog.h" #include "tnote.h" @@ -28,7 +29,6 @@ #include "tsocket.h" #include "ttimer.h" #include "tutil.h" -#include "ihash.h" TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { @@ -63,19 +63,17 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const } } -#ifdef CLUSTER if (ip && ip[0]) { + tscMgmtIpList.numOfIps = 4; + strcpy(tscMgmtIpList.ipstr[0], ip); + tscMgmtIpList.ip[0] = inet_addr(ip); strcpy(tscMgmtIpList.ipstr[1], ip); tscMgmtIpList.ip[1] = inet_addr(ip); + strcpy(tscMgmtIpList.ipstr[2], tsMasterIp); + tscMgmtIpList.ip[2] = inet_addr(tsMasterIp); + strcpy(tscMgmtIpList.ipstr[3], tsSecondIp); + tscMgmtIpList.ip[3] = inet_addr(tsSecondIp); } -#else - if (ip && ip[0]) { - if (ip != tsMasterIp) { - strcpy(tsMasterIp, ip); - } - tsServerIp = inet_addr(ip); - } -#endif pObj = (STscObj *)malloc(sizeof(STscObj)); if (NULL == pObj) { @@ -128,8 +126,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const } pSql->cmd.command = TSDB_SQL_CONNECT; - int ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); - if (TSDB_CODE_SUCCESS != ret) { + if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; free(pSql); free(pObj); @@ -175,11 +172,6 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { -#ifndef CLUSTER - if (ip == NULL) { - ip = tsMasterIp; - } -#endif return taos_connect_imp(ip, user, pass, db, port, fp, param, taos); } @@ -201,15 +193,17 @@ int taos_query_imp(STscObj *pObj, SSqlObj *pSql) { pRes->numOfRows = 1; pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; + pSql->asyncTblPos = NULL; if (NULL != pSql->pTableHashList) { - taosCleanUpIntHash(pSql->pTableHashList); + taosCleanUpHashTable(pSql->pTableHashList); pSql->pTableHashList = NULL; } - tscTrace("%p SQL: %s pObj:%p", pSql, pSql->sqlstr, pObj); + tscDump("%p pObj:%p, SQL: %s", pSql, pObj, pSql->sqlstr); - pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); + pRes->code = (uint8_t)tsParseSql(pSql, false); /* * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query. @@ -294,8 +288,12 @@ int taos_num_fields(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; if (pSql == NULL || pSql->signature != pSql) return 0; - SFieldInfo *pFieldsInfo = &pSql->cmd.fieldsInfo; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (pQueryInfo == NULL) { + return 0; + } + SFieldInfo *pFieldsInfo = &pQueryInfo->fieldsInfo; return (pFieldsInfo->numOfOutputCols - pFieldsInfo->numOfHiddenCols); } @@ -317,7 +315,8 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; if (pSql == NULL || pSql->signature != pSql) return 0; - return pSql->cmd.fieldsInfo.pFields; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + return pQueryInfo->fieldsInfo.pFields; } int taos_retrieve(TAOS_RES *res) { @@ -363,47 +362,54 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) { // secondary merge has handle this situation if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { - pRes->numOfTotal += pRes->numOfRows; + pRes->numOfTotalInCurrentClause += pRes->numOfRows; } - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + - pRes->bytes[i] * (1 - pCmd->order.order) * (pRes->numOfRows - 1); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { +// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + +// pRes->bytes[i] * (1 - pQueryInfo->order.order) * (pRes->numOfRows - 1); + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order); } *rows = pRes->tsrow; - return (pCmd->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows; + return (pQueryInfo->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows; } static void **doSetResultRowData(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + + assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows); + + if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker + tfree(pRes->tsrow); + return pRes->tsrow; + } + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); int32_t num = 0; - - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; + for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row; // primary key column cannot be null in interval query, no need to check - if (i == 0 && pCmd->nAggTimeInterval > 0) { + if (i == 0 && pQueryInfo->nAggTimeInterval > 0) { continue; } - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); if (isNull(pRes->tsrow[i], pField->type)) { pRes->tsrow[i] = NULL; } else if (pField->type == TSDB_DATA_TYPE_NCHAR) { // convert unicode to native code in a temporary buffer extra one byte for terminated symbol if (pRes->buffer[num] == NULL) { - pRes->buffer[num] = malloc(pField->bytes + 1); - } else { - pRes->buffer[num] = realloc(pRes->buffer[num], pField->bytes + 1); + pRes->buffer[num] = malloc(pField->bytes + TSDB_NCHAR_SIZE); } - /* string terminated */ - memset(pRes->buffer[num], 0, pField->bytes + 1); + /* string terminated char for binary data*/ + memset(pRes->buffer[num], 0, pField->bytes + TSDB_NCHAR_SIZE); if (taosUcs4ToMbs(pRes->tsrow[i], pField->bytes, pRes->buffer[num])) { pRes->tsrow[i] = pRes->buffer[num]; @@ -411,21 +417,14 @@ static void **doSetResultRowData(SSqlObj *pSql) { tscError("%p charset:%s to %s. val:%ls convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, pRes->tsrow); pRes->tsrow[i] = NULL; } + num++; } } - assert(num <= pCmd->fieldsInfo.numOfOutputCols); - - return pRes->tsrow; -} - -static void **getOneRowFromBuf(SSqlObj *pSql) { - doSetResultRowData(pSql); - - SSqlRes *pRes = &pSql->res; - pRes->row++; - + assert(num <= pQueryInfo->fieldsInfo.numOfOutputCols); + + pRes->row++; // index increase one-step return pRes->tsrow; } @@ -433,22 +432,29 @@ static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) { bool hasData = true; SSqlCmd *pCmd = &pSql->cmd; - if (tscProjectionQueryOnMetric(pCmd)) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { bool allSubqueryExhausted = true; for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == NULL) { + continue; + } + SSqlRes *pRes1 = &pSql->pSubs[i]->res; SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd; - SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); - assert(pCmd1->numOfTables == 1); + SQueryInfo * pQueryInfo1 = tscGetQueryInfoDetail(pCmd1, pCmd1->clauseIndex); + SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo1, 0); + + assert(pQueryInfo1->numOfTables == 1); /* * if the global limitation is not reached, and current result has not exhausted, or next more vnodes are - * available, go on + * available, goes on */ if (pMetaInfo->vnodeIndex < pMetaInfo->pMetricMeta->numOfVnodes && pRes1->row < pRes1->numOfRows && - (!tscHasReachLimitation(pSql->pSubs[i]))) { + (!tscHasReachLimitation(pQueryInfo1, pRes1))) { allSubqueryExhausted = false; break; } @@ -457,12 +463,16 @@ static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) { hasData = !allSubqueryExhausted; } else { // otherwise, in case inner join, if any subquery exhausted, query completed. for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlRes *pRes1 = &pSql->pSubs[i]->res; + if (pSql->pSubs[i] == 0) { + continue; + } + + SSqlRes * pRes1 = &pSql->pSubs[i]->res; + SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0); - if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pSql->pSubs[i]) && - tscProjectionQueryOnTable(&pSql->pSubs[i]->cmd)) || + if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pQueryInfo1, pRes1) && + tscProjectionQueryOnTable(pQueryInfo1)) || (pRes1->numOfRows == 0)) { - hasData = false; break; } @@ -472,57 +482,42 @@ static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) { return hasData; } -static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { - SSqlCmd *pCmd = &pSql->cmd; +static void **tscBuildResFromSubqueries(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; while (1) { - if (!tscHashRemainDataInSubqueryResultSet(pSql)) { // free all sub sqlobj - tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1); - - SSubqueryState *pState = NULL; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + if (pRes->tsrow == NULL) { + pRes->tsrow = calloc(pQueryInfo->exprsInfo.numOfExprs, POINTER_BYTES); + } - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlObj * pChildObj = pSql->pSubs[i]; - SJoinSubquerySupporter *pSupporter = (SJoinSubquerySupporter *)pChildObj->param; - pState = pSupporter->pState; + bool success = false; - tscDestroyJoinSupporter(pChildObj->param); - taos_free_result(pChildObj); + int32_t numOfTableHasRes = 0; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] != 0) { + numOfTableHasRes++; } - - free(pState); - return NULL; } - if (pRes->tsrow == NULL) { - pRes->tsrow = malloc(POINTER_BYTES * pCmd->exprsInfo.numOfExprs); - } - - bool success = false; - if (pSql->numOfSubs >= 2) { // do merge result - SSqlRes *pRes1 = &pSql->pSubs[0]->res; - SSqlRes *pRes2 = &pSql->pSubs[1]->res; + if (numOfTableHasRes >= 2) { // do merge result - if (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) { - doSetResultRowData(pSql->pSubs[0]); - doSetResultRowData(pSql->pSubs[1]); + success = (doSetResultRowData(pSql->pSubs[0]) != NULL) && + (doSetResultRowData(pSql->pSubs[1]) != NULL); // TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; // TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; // printf("first:%" PRId64 ", second:%" PRId64 "\n", key1, key2); - success = true; - pRes1->row++; - pRes2->row++; - } } else { // only one subquery - SSqlRes *pRes1 = &pSql->pSubs[0]->res; - doSetResultRowData(pSql->pSubs[0]); + SSqlObj *pSub = pSql->pSubs[0]; + if (pSub == NULL) { + pSub = pSql->pSubs[1]; + } - success = (pRes1->row++ < pRes1->numOfRows); + success = (doSetResultRowData(pSub) != NULL); } if (success) { // current row of final output has been built, return to app - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { int32_t tableIndex = pRes->pColumnIndex[i].tableIndex; int32_t columnIndex = pRes->pColumnIndex[i].columnIndex; @@ -530,8 +525,32 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { pRes->tsrow[i] = pRes1->tsrow[columnIndex]; } + pRes->numOfTotalInCurrentClause++; + break; } else { // continue retrieve data from vnode + if (!tscHashRemainDataInSubqueryResultSet(pSql)) { + tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1); + SSubqueryState *pState = NULL; + + // free all sub sqlobj + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj *pChildObj = pSql->pSubs[i]; + if (pChildObj == NULL) { + continue; + } + + SJoinSubquerySupporter *pSupporter = (SJoinSubquerySupporter *)pChildObj->param; + pState = pSupporter->pState; + + tscDestroyJoinSupporter(pChildObj->param); + taos_free_result(pChildObj); + } + + free(pState); + return NULL; + } + tscFetchDatablockFromSubquery(pSql); if (pRes->code != TSDB_CODE_SUCCESS) { return NULL; @@ -556,81 +575,72 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) { if (pRes->code == TSDB_CODE_SUCCESS) { tscTrace("%p data from all subqueries have been retrieved to client", pSql); - return tscJoinResultsetFromBuf(pSql); + return tscBuildResFromSubqueries(pSql); } else { tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code); return NULL; } } else if (pRes->row >= pRes->numOfRows) { + /** + * NOT a join query + * + * If the data block of current result set have been consumed already, try fetch next result + * data block from virtual node. + */ tscResetForNextRetrieve(pRes); if (pCmd->command < TSDB_SQL_LOCAL) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } - tscProcessSql(pSql); - if (pRes->numOfRows == 0) { - return NULL; + tscProcessSql(pSql); // retrieve data from virtual node + + //if failed to retrieve data from current virtual node, try next one if exists + if (hasMoreVnodesToTry(pSql)) { + tscTryQueryNextVnode(pSql, NULL); } - // local reducer has handle this situation + /* + * local reducer has handle this case, + * so no need to add the pRes->numOfRows for super table query + */ if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { - pRes->numOfTotal += pRes->numOfRows; + pRes->numOfTotalInCurrentClause += pRes->numOfRows; + } + + if (pRes->numOfRows == 0) { + return NULL; } } - return getOneRowFromBuf(pSql); + return doSetResultRowData(pSql); } TAOS_ROW taos_fetch_row(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; if (pSql == NULL || pSql->signature != pSql) { globalCode = TSDB_CODE_DISCONNECTED; return NULL; } - // projection query on metric, pipeline retrieve data from vnode list, instead of two-stage merge + /* + * projection query on super table, access each virtual node sequentially retrieve data from vnode list, + * instead of two-stage merge + */ TAOS_ROW rows = taos_fetch_row_impl(res); - while (rows == NULL && tscProjectionQueryOnMetric(pCmd)) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - // reach the maximum number of output rows, abort - if (tscHasReachLimitation(pSql)) { - return NULL; - } - - /* - * update the limit and offset value according to current retrieval results - * Note: if pRes->offset > 0, pRes->numOfRows = 0, pRes->numOfTotal = 0; - */ - pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - pCmd->limit.offset = pRes->offset; - - assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); - - /* - * For project query with super table join, the numOfSub is equalled to the number of all subqueries, so - * we need to reset the value of numOfSubs to be 0. - * - * For super table join with projection query, if anyone of the subquery is exhausted, the query completed. - */ - pSql->numOfSubs = 0; - - if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { - pCmd->command = TSDB_SQL_SELECT; - assert(pSql->fp == NULL); - tscProcessSql(pSql); - rows = taos_fetch_row_impl(res); - } + if (rows != NULL) { + return rows; + } - // check!!! - if (rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { - break; - } + // current subclause is completed, try the next subclause + while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) { + tscTryQueryNextClause(pSql, NULL); + + // if the rows is not NULL, return immediately + rows = taos_fetch_row_impl(res); } return rows; @@ -652,36 +662,34 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { // projection query on metric, pipeline retrieve data from vnode list, // instead of two-stage mergevnodeProcessMsgFromShell free qhandle nRows = taos_fetch_block_impl(res, rows); - while (*rows == NULL && tscProjectionQueryOnMetric(pCmd)) { - /* reach the maximum number of output rows, abort */ - if (tscHasReachLimitation(pSql)) { - return 0; - } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + // current subclause is completed, try the next subclause + while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - /* update the limit value according to current retrieval results */ - pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal; - pCmd->limit.offset = pRes->offset; + pSql->cmd.command = pQueryInfo->command; + pCmd->clauseIndex++; - if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { - pSql->cmd.command = TSDB_SQL_SELECT; - assert(pSql->fp == NULL); - tscProcessSql(pSql); - nRows = taos_fetch_block_impl(res, rows); - } + pRes->numOfTotal += pRes->numOfTotalInCurrentClause; + pRes->numOfTotalInCurrentClause = 0; + pRes->rspType = 0; - // check!!! - if (*rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { - break; - } + pSql->numOfSubs = 0; + tfree(pSql->pSubs); + + assert(pSql->fp == NULL); + + tscTrace("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause); + tscProcessSql(pSql); + + nRows = taos_fetch_block_impl(res, rows); } return nRows; } int taos_select_db(TAOS *taos, const char *db) { - char sql[64]; + char sql[256] = {0}; STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -689,12 +697,11 @@ int taos_select_db(TAOS *taos, const char *db) { return TSDB_CODE_DISCONNECTED; } - sprintf(sql, "use %s", db); - + snprintf(sql, tListLen(sql), "use %s", db); return taos_query(taos, sql); } -void taos_free_result(TAOS_RES *res) { +void taos_free_result_imp(TAOS_RES* res, int keepCmd) { if (res == NULL) return; SSqlObj *pSql = (SSqlObj *)res; @@ -712,6 +719,8 @@ void taos_free_result(TAOS_RES *res) { pSql->thandle = NULL; tscFreeSqlObj(pSql); tscTrace("%p Async SqlObj is freed by app", pSql); + } else if (keepCmd) { + tscFreeSqlResult(pSql); } else { tscFreeSqlObjPartial(pSql); } @@ -719,9 +728,15 @@ void taos_free_result(TAOS_RES *res) { } // set freeFlag to 1 in retrieve message if there are un-retrieved results - pCmd->type = TSDB_QUERY_TYPE_FREE_RESOURCE; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (pQueryInfo == NULL) { + tscFreeSqlObjPartial(pSql); + return; + } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); /* * case 1. Partial data have been retrieved from vnodes, but not all data has been retrieved yet. @@ -739,6 +754,8 @@ void taos_free_result(TAOS_RES *res) { pSql->pStream == NULL && pMeterMetaInfo->pMeterMeta != NULL))) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + tscTrace("%p code:%d, numOfRows:%d, command:%d", pSql, pRes->code, pRes->numOfRows, pCmd->command); + void *fp = pSql->fp; if (fp != NULL) { pSql->freed = 1; @@ -761,8 +778,13 @@ void taos_free_result(TAOS_RES *res) { * Then this object will be reused and no free operation is required. */ pSql->thandle = NULL; - tscFreeSqlObjPartial(pSql); - tscTrace("%p sql result is freed by app", pSql); + if (keepCmd) { + tscFreeSqlResult(pSql); + tscTrace("%p sql result is freed by app while sql command is kept", pSql); + } else { + tscFreeSqlObjPartial(pSql); + tscTrace("%p sql result is freed by app", pSql); + } } } else { // if no free resource msg is sent to vnode, we free this object immediately. @@ -772,6 +794,9 @@ void taos_free_result(TAOS_RES *res) { assert(pRes->numOfRows == 0 || (pCmd->command > TSDB_SQL_LOCAL)); tscFreeSqlObj(pSql); tscTrace("%p Async sql result is freed by app", pSql); + } else if (keepCmd) { + tscFreeSqlResult(pSql); + tscTrace("%p sql result is freed while sql command is kept", pSql); } else { tscFreeSqlObjPartial(pSql); tscTrace("%p sql result is freed", pSql); @@ -779,6 +804,10 @@ void taos_free_result(TAOS_RES *res) { } } +void taos_free_result(TAOS_RES *res) { + taos_free_result_imp(res, 0); +} + int taos_errno(TAOS *taos) { STscObj *pObj = (STscObj *)taos; int code; @@ -793,26 +822,47 @@ int taos_errno(TAOS *taos) { return code; } +static bool validErrorCode(int32_t code) { + return code >= TSDB_CODE_SUCCESS && code < TSDB_CODE_MAX_ERROR_CODE; +} + +/* + * In case of invalid sql error, additional information is attached to explain + * why the sql is invalid + */ +static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd* pCmd) { + if (code != TSDB_CODE_INVALID_SQL) { + return false; + } + + size_t len = strlen(pCmd->payload); + + char* z = NULL; + if (len > 0) { + z = strstr (pCmd->payload, "invalid SQL"); + } + + return z != NULL; +} + char *taos_errstr(TAOS *taos) { STscObj *pObj = (STscObj *)taos; uint8_t code; if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode]; - if ((int8_t)(pObj->pSql->res.code) == -1) - code = TSDB_CODE_OTHERS; - else - code = pObj->pSql->res.code; + SSqlObj* pSql = pObj->pSql; + + if (validErrorCode(pSql->res.code)) { + code = pSql->res.code; + } else { + code = TSDB_CODE_OTHERS; //unknown error + } - // for invalid sql, additional information is attached to explain why the sql is invalid - if (code == TSDB_CODE_INVALID_SQL) { - return pObj->pSql->cmd.payload; + if (hasAdditionalErrorInfo(code, &pSql->cmd)) { + return pSql->cmd.payload; } else { - if (code < 0 || code > TSDB_CODE_MAX_ERROR_CODE) { - return tsError[TSDB_CODE_SUCCESS]; - } else { - return tsError[code]; - } + return tsError[code]; } } @@ -835,12 +885,15 @@ void taos_stop_query(TAOS_RES *res) { if (res == NULL) return; SSqlObj *pSql = (SSqlObj *)res; + SSqlCmd *pCmd = &pSql->cmd; + if (pSql->signature != pSql) return; tscTrace("%p start to cancel query", res); pSql->res.code = TSDB_CODE_QUERY_CANCELLED; - if (tscIsTwoStageMergeMetricQuery(&pSql->cmd)) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) { tscKillMetricQuery(pSql); return; } @@ -861,61 +914,61 @@ void taos_stop_query(TAOS_RES *res) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int len = 0; for (int i = 0; i < num_fields; ++i) { + if (i > 0) { + str[len++] = ' '; + } + if (row[i] == NULL) { - len += sprintf(str + len, "%s ", TSDB_DATA_NULL_STR); + len += sprintf(str + len, "%s", TSDB_DATA_NULL_STR); continue; } switch (fields[i].type) { case TSDB_DATA_TYPE_TINYINT: - len += sprintf(str + len, "%d ", *((char *)row[i])); + len += sprintf(str + len, "%d", *((char *)row[i])); break; case TSDB_DATA_TYPE_SMALLINT: - len += sprintf(str + len, "%d ", *((short *)row[i])); + len += sprintf(str + len, "%d", *((short *)row[i])); break; case TSDB_DATA_TYPE_INT: - len += sprintf(str + len, "%d ", *((int *)row[i])); + len += sprintf(str + len, "%d", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: { float fv = 0; fv = GET_FLOAT_VAL(row[i]); - len += sprintf(str + len, "%f ", fv); - } - break; + len += sprintf(str + len, "%f", fv); + } break; - case TSDB_DATA_TYPE_DOUBLE:{ + case TSDB_DATA_TYPE_DOUBLE: { double dv = 0; dv = GET_DOUBLE_VAL(row[i]); - len += sprintf(str + len, "%lf ", dv); - } - break; + len += sprintf(str + len, "%lf", dv); + } break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: { - /* limit the max length of string to no greater than the maximum length, - * in case of not null-terminated string */ - size_t xlen = strlen(row[i]); - size_t trueLen = MIN(xlen, fields[i].bytes); - - memcpy(str + len, (char *)row[i], trueLen); - - str[len + trueLen] = ' '; - len += (trueLen + 1); - } break; + size_t xlen = 0; + for (xlen = 0; xlen <= fields[i].bytes; xlen++) { + char c = ((char*)row[i])[xlen]; + if (c == 0) break; + str[len++] = c; + } + str[len] = 0; + } break; case TSDB_DATA_TYPE_TIMESTAMP: - len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_BOOL: - len += sprintf(str + len, "%d ", *((int8_t *)row[i])); + len += sprintf(str + len, "%d", *((int8_t *)row[i])); default: break; } @@ -936,6 +989,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { pRes->numOfRows = 1; pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj); @@ -958,11 +1012,11 @@ int taos_validate_sql(TAOS *taos, const char *sql) { pSql->asyncTblPos = NULL; if (NULL != pSql->pTableHashList) { - taosCleanUpIntHash(pSql->pTableHashList); + taosCleanUpHashTable(pSql->pTableHashList); pSql->pTableHashList = NULL; } - pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); + pRes->code = (uint8_t)tsParseSql(pSql, false); int code = pRes->code; tscTrace("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); @@ -973,7 +1027,6 @@ int taos_validate_sql(TAOS *taos, const char *sql) { static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) { // must before clean the sqlcmd object - tscRemoveAllMeterMetaInfo(&pSql->cmd, false); tscCleanSqlCmd(&pSql->cmd); SSqlCmd *pCmd = &pSql->cmd; @@ -984,7 +1037,10 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t int code = TSDB_CODE_INVALID_METER_ID; char *str = (char *)tblNameList; - SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo); + + SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) { return code; @@ -1019,7 +1075,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t return code; } - if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) { + if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) { return code; } @@ -1064,6 +1120,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { SSqlRes *pRes = &pSql->res; pRes->numOfTotal = 0; // the number of getting table meta from server + pRes->numOfTotalInCurrentClause = 0; + pRes->code = 0; assert(pSql->fp == NULL); diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index cd2736e9103b4b2af8b1ecf927b214a699a80bf3..9fc9706dd9fb87179fabcc484f7ae56afb231914 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -31,9 +31,13 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql); static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer); -static bool isProjectStream(SSqlCmd *pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); +static int64_t getDelayValueAfterTimewindowClosed(SSqlStream* pStream, int64_t launchDelay) { + return taosGetTimestamp(pStream->precision) + launchDelay - pStream->stime - 1; +} + +static bool isProjectStream(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_PRJ) { return false; } @@ -66,27 +70,29 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { pSql->fp = tscProcessStreamQueryCallback; pSql->param = pStream; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - int code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + int code = tscGetMeterMeta(pSql, pMeterMetaInfo); pSql->res.code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; - if (code == 0 && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - code = tscGetMetricMeta(pSql); + if (code == 0 && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + code = tscGetMetricMeta(pSql, 0); pSql->res.code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; } - tscTansformSQLFunctionForMetricQuery(&pSql->cmd); + tscTansformSQLFunctionForSTableQuery(pQueryInfo); // failed to get meter/metric meta, retry in 10sec. if (code != TSDB_CODE_SUCCESS) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); - + tscSetRetryTimer(pStream, pSql, retryDelayTime); return; } @@ -105,22 +111,23 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { pStream->numOfRes = 0; // reset the numOfRes. SSqlObj *pSql = pStream->pSql; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); tscTrace("%p add into timer", pSql); - if (isProjectStream(&pSql->cmd)) { + if (isProjectStream(pQueryInfo)) { /* - * pSql->cmd.etime, which is the start time, does not change in case of + * pQueryInfo->etime, which is the start time, does not change in case of * repeat first execution, once the first execution failed. */ - pSql->cmd.stime = pStream->stime; // start time + pQueryInfo->stime = pStream->stime; // start time - pSql->cmd.etime = taosGetTimestamp(pStream->precision); // end time - if (pSql->cmd.etime > pStream->etime) { - pSql->cmd.etime = pStream->etime; + pQueryInfo->etime = taosGetTimestamp(pStream->precision); // end time + if (pQueryInfo->etime > pStream->etime) { + pQueryInfo->etime = pStream->etime; } } else { - pSql->cmd.stime = pStream->stime - pStream->interval; - pSql->cmd.etime = pStream->stime - 1; + pQueryInfo->stime = pStream->stime - pStream->interval; + pQueryInfo->etime = pStream->stime - 1; } // launch stream computing in a new thread @@ -139,9 +146,9 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, retryDelay); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0, 0); tscClearMeterMetaInfo(pMeterMetaInfo, true); - + tscSetRetryTimer(pStream, pStream->pSql, retryDelay); return; } @@ -165,24 +172,25 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) { static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) { SSqlStream * pStream = (SSqlStream *)param; SSqlObj * pSql = (SSqlObj *)res; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); if (pSql == NULL || numOfRows < 0) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); tscClearMeterMetaInfo(pMeterMetaInfo, true); - + tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); return; } if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful. pStream->numOfRes += numOfRows; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + for(int32_t i = 0; i < numOfRows; ++i) { TAOS_ROW row = taos_fetch_row(res); tscTrace("%p stream:%p fetch result", pSql, pStream); - if (isProjectStream(&pSql->cmd)) { + if (isProjectStream(pQueryInfo)) { pStream->stime = *(TSKEY *)row[0]; } else { tscSetTimestampForRes(pStream, pSql); @@ -197,9 +205,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf } else { // numOfRows == 0, all data has been retrieved pStream->useconds += pSql->res.useconds; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (pStream->numOfRes == 0) { - if (pSql->cmd.interpoType == TSDB_INTERPO_SET_VALUE || pSql->cmd.interpoType == TSDB_INTERPO_NULL) { - SSqlCmd *pCmd = &pSql->cmd; + if (pQueryInfo->interpoType == TSDB_INTERPO_SET_VALUE || pQueryInfo->interpoType == TSDB_INTERPO_NULL) { SSqlRes *pRes = &pSql->res; /* failed to retrieve any result in this retrieve */ @@ -209,12 +218,12 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf void *oldPtr = pSql->res.data; pSql->res.data = tmpRes; + + for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); - for (int32_t i = 1; i < pSql->cmd.fieldsInfo.numOfOutputCols; ++i) { - int16_t offset = tscFieldInfoGetOffset(pCmd, i); - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - - assignVal(pSql->res.data + offset, (char *)(&pCmd->defaultVal[i]), pField->bytes, pField->type); + assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->defaultVal[i]), pField->bytes, pField->type); row[i] = pSql->res.data + offset; } @@ -222,7 +231,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf row[0] = pRes->data; // char result[512] = {0}; - // taos_print_row(result, row, pSql->cmd.fieldsInfo.pFields, pSql->cmd.fieldsInfo.numOfOutputCols); + // taos_print_row(result, row, pQueryInfo->fieldsInfo.pFields, pQueryInfo->fieldsInfo.numOfOutputCols); // tscPrint("%p stream:%p query result: %s", pSql, pStream, result); tscTrace("%p stream:%p fetch result", pSql, pStream); @@ -231,18 +240,19 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf pRes->numOfRows = 0; pRes->data = oldPtr; - } else if (isProjectStream(&pSql->cmd)) { + } else if (isProjectStream(pQueryInfo)) { /* no resuls in the query range, retry */ // todo set retry dynamic time int32_t retry = tsProjectExecInterval; tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retry); tscClearSqlMetaInfoForce(&(pStream->pSql->cmd)); + tscSetRetryTimer(pStream, pStream->pSql, retry); return; } } else { - if (isProjectStream(&pSql->cmd)) { + if (isProjectStream(pQueryInfo)) { pStream->stime += 1; } } @@ -257,7 +267,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf } static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) { - if (isProjectStream(&pSql->cmd)) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + int64_t delay = getDelayValueAfterTimewindowClosed(pStream, timer); + + if (isProjectStream(pQueryInfo)) { int64_t now = taosGetTimestamp(pStream->precision); int64_t etime = now > pStream->etime ? pStream->etime : now; @@ -268,19 +281,19 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } - - tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, - now + timer, timer, pStream->stime, etime); + + tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, + now + timer, timer, delay, pStream->stime, etime); } else { - tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, - pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1); + tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, + pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1); } pSql->cmd.command = TSDB_SQL_SELECT; @@ -289,10 +302,34 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) taosTmrReset(tscProcessStreamTimer, timer, pStream, tscTmr, &pStream->pTimer); } +static int64_t getLaunchTimeDelay(const SSqlStream* pStream) { + int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio); + + int64_t maxDelay = + (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay; + + if (delayDelta > maxDelay) { + delayDelta = maxDelay; + } + + int64_t remainTimeWindow = pStream->slidingTime - delayDelta; + if (maxDelay > remainTimeWindow) { + maxDelay = (remainTimeWindow / 1.5); + } + + int64_t currentDelay = (rand() % maxDelay); // a random number + currentDelay += delayDelta; + assert(currentDelay < pStream->slidingTime); + + return currentDelay; +} + + static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { int64_t timer = 0; - - if (isProjectStream(&pSql->cmd)) { + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (isProjectStream(pQueryInfo)) { /* * for project query, no mater fetch data successfully or not, next launch will issue * more than the sliding time window @@ -302,44 +339,35 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } } else { pStream->stime += pStream->slidingTime; if ((pStream->stime - pStream->interval) >= pStream->etime) { - tscTrace("%p stream:%p, stime:%ld is larger than end time: %ld, stop the stream", pStream->pSql, pStream, + tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } - + timer = pStream->stime - taosGetTimestamp(pStream->precision); if (timer < 0) { timer = 0; } } - int64_t delayDelta = (int64_t)(pStream->slidingTime * 0.1); - delayDelta = (rand() % delayDelta); - - int64_t maxDelay = - (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay; - - if (delayDelta > maxDelay) { - delayDelta = maxDelay; - } - - timer += delayDelta; // a random number + timer += getLaunchTimeDelay(pStream); + if (pStream->precision == TSDB_TIME_PRECISION_MICRO) { timer = timer / 1000L; } @@ -348,56 +376,59 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { } static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { - SSqlCmd *pCmd = &pSql->cmd; - int64_t minIntervalTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime; - if (pCmd->nAggTimeInterval < minIntervalTime) { + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + if (pQueryInfo->nAggTimeInterval < minIntervalTime) { tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream, - pCmd->nAggTimeInterval, minIntervalTime); - pCmd->nAggTimeInterval = minIntervalTime; + pQueryInfo->nAggTimeInterval, minIntervalTime); + pQueryInfo->nAggTimeInterval = minIntervalTime; } - pStream->interval = pCmd->nAggTimeInterval; // it shall be derived from sql string + pStream->interval = pQueryInfo->nAggTimeInterval; // it shall be derived from sql string - if (pCmd->nSlidingTime == 0) { - pCmd->nSlidingTime = pCmd->nAggTimeInterval; + if (pQueryInfo->nSlidingTime == 0) { + pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval; } int64_t minSlidingTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime; - if (pCmd->nSlidingTime < minSlidingTime) { - tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream, pCmd->nSlidingTime, - minSlidingTime); + if (pQueryInfo->nSlidingTime < minSlidingTime) { + tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream, + pQueryInfo->nSlidingTime, minSlidingTime); - pCmd->nSlidingTime = minSlidingTime; + pQueryInfo->nSlidingTime = minSlidingTime; } - if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { + if (pQueryInfo->nSlidingTime > pQueryInfo->nAggTimeInterval) { tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream, - pCmd->nSlidingTime, pCmd->nAggTimeInterval); + pQueryInfo->nSlidingTime, pQueryInfo->nAggTimeInterval); - pCmd->nSlidingTime = pCmd->nAggTimeInterval; + pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval; } - pStream->slidingTime = pCmd->nSlidingTime; - pCmd->nAggTimeInterval = 0; // clear the interval value to avoid the force time window split by query processor + pStream->slidingTime = pQueryInfo->nSlidingTime; + + pQueryInfo->nAggTimeInterval = 0; // clear the interval value to avoid the force time window split by query processor + pQueryInfo->nSlidingTime = 0; } static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) { - SSqlCmd *pCmd = &pSql->cmd; - - if (isProjectStream(pCmd)) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + if (isProjectStream(pQueryInfo)) { // no data in table, flush all data till now to destination meter, 10sec delay pStream->interval = tsProjectExecInterval; pStream->slidingTime = tsProjectExecInterval; if (stime != 0) { // first projection start from the latest event timestamp - assert(stime >= pCmd->stime); + assert(stime >= pQueryInfo->stime); stime += 1; // exclude the last records from table } else { - stime = pCmd->stime; + stime = pQueryInfo->stime; } } else { // timewindow based aggregation stream if (stime == 0) { // no data in meter till now @@ -419,24 +450,12 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) { int64_t timer = pStream->stime - taosGetTimestamp(pStream->precision); if (timer < 0) timer = 0; - int64_t delayDelta = (int64_t)(pStream->interval * 0.1); - - int64_t maxDelay = - (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay; - if (delayDelta > maxDelay) { - delayDelta = maxDelay; - } - int64_t startDelay = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay; - - srand(time(NULL)); - timer += (rand() % delayDelta); // a random number - - if (timer < startDelay || timer > maxDelay) { - timer = (timer % startDelay) + startDelay; - } - + + timer += getLaunchTimeDelay(pStream); + timer += startDelay; + return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer; } @@ -499,8 +518,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p return NULL; } - // TODO later refactor use enum - pSql->cmd.count = 1; // 1 means sql in stream, allowed the sliding clause. + pSql->cmd.inStream = 1; // 1 means sql in stream, allowed the sliding clause. pRes->code = tscToSQLCmd(pSql, &SQLInfo); SQLInfoDestroy(&SQLInfo); @@ -521,7 +539,8 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p return NULL; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); pStream->fp = fp; pStream->callback = callback; @@ -530,7 +549,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p pStream->precision = pMeterMetaInfo->pMeterMeta->precision; pStream->ctime = taosGetTimestamp(pStream->precision); - pStream->etime = pCmd->etime; + pStream->etime = pQueryInfo->etime; pSql->pStream = pStream; tscAddIntoStreamList(pStream); diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index f2e9395c68b0dfc5a057b331cf00d38dbd9cb311..610c119e6d327c9b8b372136959b07098ffaef2e 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -22,125 +22,410 @@ #include "tsclient.h" #include "tsocket.h" #include "ttime.h" +#include "ttimer.h" #include "tutil.h" +#include "tscUtil.h" +#include "tcache.h" +#include "tscProfile.h" -typedef struct { - void * signature; - char name[TSDB_METER_ID_LEN]; - int mseconds; - TSKEY lastKey; - uint64_t stime; - TAOS_FIELD fields[TSDB_MAX_COLUMNS]; - int numOfFields; - TAOS * taos; - TAOS_RES * result; +typedef struct SSubscriptionProgress { + int64_t uid; + TSKEY key; +} SSubscriptionProgress; + +typedef struct SSub { + void * signature; + char topic[32]; + int64_t lastSyncTime; + int64_t lastConsumeTime; + TAOS * taos; + void * pTimer; + SSqlObj * pSql; + int interval; + TAOS_SUBSCRIBE_CALLBACK fp; + void * param; + int numOfMeters; + SSubscriptionProgress * progress; } SSub; -TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *name, int64_t time, int mseconds) { - SSub *pSub; - pSub = (SSub *)malloc(sizeof(SSub)); - if (pSub == NULL) return NULL; - memset(pSub, 0, sizeof(SSub)); +static int tscCompareSubscriptionProgress(const void* a, const void* b) { + const SSubscriptionProgress* x = (const SSubscriptionProgress*)a; + const SSubscriptionProgress* y = (const SSubscriptionProgress*)b; + if (x->uid > y->uid) return 1; + if (x->uid < y->uid) return -1; + return 0; +} + +TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid) { + if (sub == NULL) + return 0; + + SSub* pSub = (SSub*)sub; + for (int s = 0, e = pSub->numOfMeters; s < e;) { + int m = (s + e) / 2; + SSubscriptionProgress* p = pSub->progress + m; + if (p->uid > uid) + e = m; + else if (p->uid < uid) + s = m + 1; + else + return p->key; + } + + return 0; +} + +void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) { + if( sub == NULL) + return; + + SSub* pSub = (SSub*)sub; + for (int s = 0, e = pSub->numOfMeters; s < e;) { + int m = (s + e) / 2; + SSubscriptionProgress* p = pSub->progress + m; + if (p->uid > uid) + e = m; + else if (p->uid < uid) + s = m + 1; + else { + if (ts >= p->key) p->key = ts; + break; + } + } +} + +static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* sql) { + SSub* pSub = calloc(1, sizeof(SSub)); + if (pSub == NULL) { + globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("failed to allocate memory for subscription"); + return NULL; + } + + SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); + if (pSql == NULL) { + globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("failed to allocate SSqlObj for subscription"); + goto failed; + } + + pSql->signature = pSql; + pSql->pTscObj = pObj; + + char* sqlstr = (char*)malloc(strlen(sql) + 1); + if (sqlstr == NULL) { + tscError("failed to allocate sql string for subscription"); + goto failed; + } + strcpy(sqlstr, sql); + strtolower(sqlstr, sqlstr); + pSql->sqlstr = sqlstr; + + tsem_init(&pSql->rspSem, 0, 0); + tsem_init(&pSql->emptyRspSem, 0, 1); + + SSqlRes *pRes = &pSql->res; + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + + pSql->pSubscription = pSub; + pSub->pSql = pSql; pSub->signature = pSub; - strcpy(pSub->name, name); - pSub->mseconds = mseconds; - pSub->lastKey = time; - if (pSub->lastKey == 0) { - pSub->lastKey = taosGetTimestampMs(); + strncpy(pSub->topic, topic, sizeof(pSub->topic)); + pSub->topic[sizeof(pSub->topic) - 1] = 0; + return pSub; + +failed: + if (sqlstr != NULL) { + free(sqlstr); + } + if (pSql != NULL) { + free(pSql); + } + free(pSub); + return NULL; +} + + +static void tscProcessSubscriptionTimer(void *handle, void *tmrId) { + SSub *pSub = (SSub *)handle; + if (pSub == NULL || pSub->pTimer != tmrId) return; + + TAOS_RES* res = taos_consume(pSub); + if (res != NULL) { + pSub->fp(pSub, res, pSub->param, 0); + } + + taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer); +} + + +int tscUpdateSubscription(STscObj* pObj, SSub* pSub) { + int code = (uint8_t)tsParseSql(pSub->pSql, false); + if (code != TSDB_CODE_SUCCESS) { + tscError("failed to parse sql statement: %s", pSub->topic); + return 0; + } + + SSqlCmd* pCmd = &pSub->pSql->cmd; + if (pCmd->command != TSDB_SQL_SELECT) { + tscError("only 'select' statement is allowed in subscription: %s", pSub->topic); + return 0; + } + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0, 0); + int numOfMeters = 0; + if (!UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { + SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; + for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i); + numOfMeters += pVnodeSidList->numOfSids; + } } - taos_init(); - pSub->taos = taos_connect(host, user, pass, NULL, 0); - if (pSub->taos == NULL) { - tfree(pSub); + SSubscriptionProgress* progress = (SSubscriptionProgress*)calloc(numOfMeters, sizeof(SSubscriptionProgress)); + if (progress == NULL) { + tscError("failed to allocate memory for progress: %s", pSub->topic); + return 0; + } + + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { + numOfMeters = 1; + int64_t uid = pMeterMetaInfo->pMeterMeta->uid; + progress[0].uid = uid; + progress[0].key = tscGetSubscriptionProgress(pSub, uid); } else { - char qstr[256] = {0}; - sprintf(qstr, "use %s", db); - int res = taos_query(pSub->taos, qstr); - if (res != 0) { - tscError("failed to open DB:%s", db); - taos_close(pSub->taos); - tfree(pSub); - } else { - snprintf(qstr, tListLen(qstr), "select * from %s where _c0 > now+1000d", pSub->name); - if (taos_query(pSub->taos, qstr)) { - tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); - taos_close(pSub->taos); - tfree(pSub); - return NULL; + SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; + numOfMeters = 0; + for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i); + for (int32_t j = 0; j < pVnodeSidList->numOfSids; j++) { + SMeterSidExtInfo *pMeterInfo = tscGetMeterSidInfo(pVnodeSidList, j); + int64_t uid = pMeterInfo->uid; + progress[numOfMeters].uid = uid; + progress[numOfMeters++].key = tscGetSubscriptionProgress(pSub, uid); } - pSub->result = taos_use_result(pSub->taos); - pSub->numOfFields = taos_num_fields(pSub->result); - memcpy(pSub->fields, taos_fetch_fields(pSub->result), sizeof(TAOS_FIELD) * pSub->numOfFields); } + qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress); } - return pSub; + free(pSub->progress); + pSub->numOfMeters = numOfMeters; + pSub->progress = progress; + + pSub->lastSyncTime = taosGetTimestampMs(); + + return 1; } -TAOS_ROW taos_consume(TAOS_SUB *tsub) { - SSub * pSub = (SSub *)tsub; - TAOS_ROW row; - char qstr[256]; - if (pSub == NULL) return NULL; - if (pSub->signature != pSub) return NULL; - - while (1) { - if (pSub->result != NULL) { - row = taos_fetch_row(pSub->result); - if (row != NULL) { - pSub->lastKey = *((uint64_t *)row[0]); - return row; - } +static int tscLoadSubscriptionProgress(SSub* pSub) { + char buf[TSDB_MAX_SQL_LEN]; + sprintf(buf, "%s/subscribe/%s", dataDir, pSub->topic); - taos_free_result(pSub->result); - pSub->result = NULL; - uint64_t etime = taosGetTimestampMs(); - int64_t mseconds = pSub->mseconds - etime + pSub->stime; - if (mseconds < 0) mseconds = 0; - taosMsleep((int)mseconds); - } + FILE* fp = fopen(buf, "r"); + if (fp == NULL) { + tscTrace("subscription progress file does not exist: %s", pSub->topic); + return 1; + } - pSub->stime = taosGetTimestampMs(); + if (fgets(buf, sizeof(buf), fp) == NULL) { + tscTrace("invalid subscription progress file: %s", pSub->topic); + fclose(fp); + return 0; + } - sprintf(qstr, "select * from %s where _c0 > %" PRId64 " order by _c0 asc", pSub->name, pSub->lastKey); - if (taos_query(pSub->taos, qstr)) { - tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); - return NULL; + for (int i = 0; i < sizeof(buf); i++) { + if (buf[i] == 0) + break; + if (buf[i] == '\r' || buf[i] == '\n') { + buf[i] = 0; + break; } + } + if (strcmp(buf, pSub->pSql->sqlstr) != 0) { + tscTrace("subscription sql statement mismatch: %s", pSub->topic); + fclose(fp); + return 0; + } - pSub->result = taos_use_result(pSub->taos); + if (fgets(buf, sizeof(buf), fp) == NULL || atoi(buf) < 0) { + tscTrace("invalid subscription progress file: %s", pSub->topic); + fclose(fp); + return 0; + } - if (pSub->result == NULL) { - tscTrace("failed to get result, reason:%s", taos_errstr(pSub->taos)); - return NULL; + int numOfMeters = atoi(buf); + SSubscriptionProgress* progress = calloc(numOfMeters, sizeof(SSubscriptionProgress)); + for (int i = 0; i < numOfMeters; i++) { + if (fgets(buf, sizeof(buf), fp) == NULL) { + fclose(fp); + free(progress); + return 0; } + int64_t uid, key; + sscanf(buf, "%" SCNd64 ":%" SCNd64, &uid, &key); + progress[i].uid = uid; + progress[i].key = key; } - return NULL; + fclose(fp); + + qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress); + pSub->numOfMeters = numOfMeters; + pSub->progress = progress; + tscTrace("subscription progress loaded, %d tables: %s", numOfMeters, pSub->topic); + return 1; } -void taos_unsubscribe(TAOS_SUB *tsub) { - SSub *pSub = (SSub *)tsub; +void tscSaveSubscriptionProgress(void* sub) { + SSub* pSub = (SSub*)sub; - if (pSub == NULL) return; - if (pSub->signature != pSub) return; + char path[256]; + sprintf(path, "%s/subscribe", dataDir); + if (access(path, 0) != 0) { + mkdir(path, 0777); + } - taos_close(pSub->taos); - free(pSub); + sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic); + FILE* fp = fopen(path, "w+"); + if (fp == NULL) { + tscError("failed to create progress file for subscription: %s", pSub->topic); + return; + } + + fputs(pSub->pSql->sqlstr, fp); + fprintf(fp, "\n%d\n", pSub->numOfMeters); + for (int i = 0; i < pSub->numOfMeters; i++) { + int64_t uid = pSub->progress[i].uid; + TSKEY key = pSub->progress[i].key; + fprintf(fp, "%" PRId64 ":%" PRId64 "\n", uid, key); + } + + fclose(fp); } -int taos_subfields_count(TAOS_SUB *tsub) { +TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) { + STscObj* pObj = (STscObj*)taos; + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + tscError("connection disconnected"); + return NULL; + } + + SSub* pSub = tscCreateSubscription(pObj, topic, sql); + if (pSub == NULL) { + return NULL; + } + pSub->taos = taos; + + if (restart) { + tscTrace("restart subscription: %s", topic); + } else { + tscLoadSubscriptionProgress(pSub); + } + + if (!tscUpdateSubscription(pObj, pSub)) { + taos_unsubscribe(pSub, 1); + return NULL; + } + + pSub->interval = interval; + if (fp != NULL) { + tscTrace("asynchronize subscription, create new timer", topic); + pSub->fp = fp; + pSub->param = param; + taosTmrReset(tscProcessSubscriptionTimer, interval, pSub, tscTmr, &pSub->pTimer); + } + + return pSub; +} + +void taos_free_result_imp(SSqlObj* pSql, int keepCmd); + +TAOS_RES *taos_consume(TAOS_SUB *tsub) { SSub *pSub = (SSub *)tsub; + if (pSub == NULL) return NULL; - return pSub->numOfFields; + tscSaveSubscriptionProgress(pSub); + + SSqlObj* pSql = pSub->pSql; + SSqlRes *pRes = &pSql->res; + + if (pSub->pTimer == NULL) { + int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime; + if (duration < (int64_t)(pSub->interval)) { + tscTrace("subscription consume too frequently, blocking..."); + taosMsleep(pSub->interval - (int32_t)duration); + } + } + + for (int retry = 0; retry < 3; retry++) { + tscRemoveFromSqlList(pSql); + + if (taosGetTimestampMs() - pSub->lastSyncTime > 10 * 60 * 1000) { + tscTrace("begin meter synchronization"); + char* sqlstr = pSql->sqlstr; + pSql->sqlstr = NULL; + taos_free_result_imp(pSql, 0); + pSql->sqlstr = sqlstr; + taosClearDataCache(tscCacheHandle); + if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL; + tscTrace("meter synchronization completed"); + } else { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + uint16_t type = pQueryInfo->type; + taos_free_result_imp(pSql, 1); + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + pRes->qhandle = 0; + pSql->thandle = NULL; + pSql->cmd.command = TSDB_SQL_SELECT; + pQueryInfo->type = type; + + tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->vnodeIndex = 0; + } + + tscDoQuery(pSql); + if (pRes->code != TSDB_CODE_NOT_ACTIVE_TABLE) { + break; + } + // meter was removed, make sync time zero, so that next retry will + // do synchronization first + pSub->lastSyncTime = 0; + } + + if (pRes->code != TSDB_CODE_SUCCESS) { + tscError("failed to query data, error code=%d", pRes->code); + tscRemoveFromSqlList(pSql); + return NULL; + } + + pSub->lastConsumeTime = taosGetTimestampMs(); + return pSql; } -TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub) { +void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) { SSub *pSub = (SSub *)tsub; + if (pSub == NULL || pSub->signature != pSub) return; - return pSub->fields; + if (pSub->pTimer != NULL) { + taosTmrStop(pSub->pTimer); + } + + if (keepProgress) { + tscSaveSubscriptionProgress(pSub); + } else { + char path[256]; + sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic); + remove(path); + } + + tscFreeSqlObj(pSub->pSql); + free(pSub->progress); + memset(pSub, 0, sizeof(*pSub)); + free(pSub); } diff --git a/src/client/src/tscSyntaxtreefunction.c b/src/client/src/tscSyntaxtreefunction.c index 914053f2f17461e0f5e7ffbd56691e7fd206cd49..1d82b0f239572676c204025fe535704c192e4da6 100644 --- a/src/client/src/tscSyntaxtreefunction.c +++ b/src/client/src/tscSyntaxtreefunction.c @@ -26,7 +26,7 @@ int32_t step = ((_ord) == TSQL_SO_ASC) ? 1 : -1; \ \ if ((len1) == (len2)) { \ - for (; i < (len2) && i >= 0; i += step, (out) += step) { \ + for (; i < (len2) && i >= 0; i += step, (out) += 1) { \ if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -34,7 +34,7 @@ *(out) = (double)(left)[i] op(right)[i]; \ } \ } else if ((len1) == 1) { \ - for (; i >= 0 && i < (len2); i += step, (out) += step) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -42,7 +42,7 @@ *(out) = (double)(left)[0] op(right)[i]; \ } \ } else if ((len2) == 1) { \ - for (; i >= 0 && i < (len1); i += step, (out) += step) { \ + for (; i >= 0 && i < (len1); i += step, (out) += 1) { \ if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -58,7 +58,7 @@ int32_t step = (_ord == TSQL_SO_ASC) ? 1 : -1; \ \ if (len1 == (len2)) { \ - for (; i >= 0 && i < (len2); i += step, (out) += step) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ if (isNull((char *)&(left[i]), _left_type) || isNull((char *)&(right[i]), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -66,7 +66,7 @@ *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \ } \ } else if (len1 == 1) { \ - for (; i >= 0 && i < (len2); i += step, (out) += step) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ if (isNull((char *)(left), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -74,7 +74,7 @@ *(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \ } \ } else if ((len2) == 1) { \ - for (; i >= 0 && i < len1; i += step, (out) += step) { \ + for (; i >= 0 && i < len1; i += step, (out) += 1) { \ if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)(right), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -112,7 +112,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -121,7 +121,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] + pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -130,7 +130,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] + pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -310,7 +310,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)&(pOutput[i]), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -318,7 +318,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] - pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -326,7 +326,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] - pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -521,7 +521,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -530,7 +530,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num *pOutput = (double)pLeft[i] * pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -539,7 +539,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num *pOutput = (double)pLeft[0] * pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -719,7 +719,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -728,7 +728,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] / pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -737,7 +737,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] / pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -933,7 +933,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -942,7 +942,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -951,7 +951,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -991,7 +991,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -1000,7 +1000,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -1009,7 +1009,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh *pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 6efe3447196d384548a97442419f0aa91c1b3c16..6c685b06b4e109cee43262a2ae382f56c495d9ce 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -48,6 +48,7 @@ static pthread_once_t tscinit = PTHREAD_ONCE_INIT; extern int tsTscEnableRecordSql; extern int tsNumOfLogLines; void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); +void deltaToUtcInitOnce(); void tscCheckDiskUsage(void *para, void *unused) { taosGetDisk(); @@ -60,6 +61,7 @@ void taos_init_imp() { SRpcInit rpcInit; srand(taosGetTimestampSec()); + deltaToUtcInitOnce(); if (tscEmbedded == 0) { /* @@ -93,7 +95,6 @@ void taos_init_imp() { taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); } -#ifdef CLUSTER tscMgmtIpList.numOfIps = 2; strcpy(tscMgmtIpList.ipstr[0], tsMasterIp); tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); @@ -106,7 +107,6 @@ void taos_init_imp() { strcpy(tscMgmtIpList.ipstr[2], tsSecondIp); tscMgmtIpList.ip[2] = inet_addr(tsSecondIp); } -#endif tscInitMsgs(); slaveIndex = rand(); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3362af2343e4eedf1493e59a383dcede46f2040d..e9395d7dde46e478ae5058e6842579eb33079b57 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -14,7 +14,8 @@ */ #include "os.h" -#include "ihash.h" +#include "tscUtil.h" +#include "hash.h" #include "taosmsg.h" #include "tcache.h" #include "tkey.h" @@ -23,7 +24,6 @@ #include "tscProfile.h" #include "tscSecondaryMerge.h" #include "tschemautil.h" -#include "tscUtil.h" #include "tsclient.h" #include "tsqldef.h" #include "ttimer.h" @@ -37,9 +37,9 @@ * fullmetername + '.' + '(nil)' + '.' + '(nil)' + relation + '.' + [tagId1, * tagId2,...] + '.' + group_orderType */ -void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { +void tscGetMetricMetaCacheKey(SQueryInfo* pQueryInfo, char* str, uint64_t uid) { int32_t index = -1; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoByUid(pCmd, uid, &index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoByUid(pQueryInfo, uid, &index); int32_t len = 0; char tagIdBuf[128] = {0}; @@ -47,7 +47,7 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { len += sprintf(&tagIdBuf[len], "%d,", pMeterMetaInfo->tagColumnIndex[i]); } - STagCond* pTagCond = &pCmd->tagCond; + STagCond* pTagCond = &pQueryInfo->tagCond; assert(len < tListLen(tagIdBuf)); const int32_t maxKeySize = TSDB_MAX_TAGS_LEN; // allowed max key size @@ -73,7 +73,7 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { int32_t keyLen = snprintf(tmp, bufSize, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name, (cond != NULL ? cond->cond : NULL), (tbnameCondLen > 0 ? pTagCond->tbnameCond.cond : NULL), - pTagCond->relType, join, tagIdBuf, pCmd->groupbyExpr.orderType); + pTagCond->relType, join, tagIdBuf, pQueryInfo->groupbyExpr.orderType); assert(keyLen <= bufSize); @@ -115,13 +115,15 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str) { } bool tscQueryOnMetric(SSqlCmd* pCmd) { - return ((pCmd->type & TSDB_QUERY_TYPE_STABLE_QUERY) == TSDB_QUERY_TYPE_STABLE_QUERY) && + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + return ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) == TSDB_QUERY_TYPE_STABLE_QUERY) && (pCmd->msgType == TSDB_MSG_TYPE_QUERY); } -bool tscQueryMetricTags(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - if (tscSqlExprGet(pCmd, i)->functionId != TSDB_FUNC_TAGPRJ) { +bool tscQueryMetricTags(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + if (tscSqlExprGet(pQueryInfo, i)->functionId != TSDB_FUNC_TAGPRJ) { return false; } } @@ -133,8 +135,10 @@ bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd) { bool hasTags = false; int32_t numOfSelectivity = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functId = tscSqlExprGet(pCmd, i)->functionId; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functId == TSDB_FUNC_TAG_DUMMY) { hasTags = true; continue; @@ -201,61 +205,84 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx) { return (SMeterSidExtInfo*)(pSidList->pSidExtInfoList[idx] + (char*)pSidList); } -bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd) { - assert(pCmd != NULL); +bool tscIsTwoStageMergeMetricQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) { + if (pQueryInfo == NULL) { + return false; + } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); if (pMeterMetaInfo == NULL || pMeterMetaInfo->pMetricMeta == NULL) { return false; } + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE) { + return false; + } - // for projection query, iterate all qualified vnodes sequentially - if (tscProjectionQueryOnMetric(pCmd)) { + // for ordered projection query, iterate all qualified vnodes sequentially + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, tableIndex)) { return false; } - if (((pCmd->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) != TSDB_QUERY_TYPE_STABLE_SUBQUERY) && - pCmd->command == TSDB_SQL_SELECT) { - return UTIL_METER_IS_METRIC(pMeterMetaInfo); + if (((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) != TSDB_QUERY_TYPE_STABLE_SUBQUERY) && + pQueryInfo->command == TSDB_SQL_SELECT) { + return UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); } return false; } -bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) { - assert(pCmd != NULL); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - +bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); + /* - * In following cases, return false for project query on metric - * 1. failed to get metermeta from server; 2. not a metric; 3. limit 0; 4. show query, instead of a select query + * In following cases, return false for non ordered project query on super table + * 1. failed to get metermeta from server; 2. not a super table; 3. limitation is 0; + * 4. show queries, instead of a select query */ - if (pMeterMetaInfo == NULL || !UTIL_METER_IS_METRIC(pMeterMetaInfo) || - pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pCmd->exprsInfo.numOfExprs == 0) { + if (pMeterMetaInfo == NULL || !UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo) || + pQueryInfo->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pQueryInfo->exprsInfo.numOfExprs == 0) { return false; } - + // only query on tag, not a projection query - if (tscQueryMetricTags(pCmd)) { + if (tscQueryMetricTags(pQueryInfo)) { return false; } - + // for project query, only the following two function is allowed - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) { return false; } } - + return true; } -bool tscProjectionQueryOnTable(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; +bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { + if (!tscIsProjectionQueryOnSTable(pQueryInfo, tableIndex)) { + return false; + } + + // order by column exists, not a non-ordered projection query + return pQueryInfo->order.orderColId < 0; +} + +bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { + if (!tscIsProjectionQueryOnSTable(pQueryInfo, tableIndex)) { + return false; + } + + // order by column exists, a non-ordered projection query + return pQueryInfo->order.orderColId >= 0; +} + +bool tscProjectionQueryOnTable(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TS) { return false; } @@ -264,9 +291,9 @@ bool tscProjectionQueryOnTable(SSqlCmd* pCmd) { return true; } -bool tscIsPointInterpQuery(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr == NULL) { return false; } @@ -283,9 +310,9 @@ bool tscIsPointInterpQuery(SSqlCmd* pCmd) { return true; } -bool tscIsTWAQuery(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +bool tscIsTWAQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr == NULL) { continue; } @@ -299,81 +326,122 @@ bool tscIsTWAQuery(SSqlCmd* pCmd) { return false; } -void tscClearInterpInfo(SSqlCmd* pCmd) { - if (!tscIsPointInterpQuery(pCmd)) { +void tscClearInterpInfo(SQueryInfo* pQueryInfo) { + if (!tscIsPointInterpQuery(pQueryInfo)) { return; } - pCmd->interpoType = TSDB_INTERPO_NONE; - memset(pCmd->defaultVal, 0, sizeof(pCmd->defaultVal)); + pQueryInfo->interpoType = TSDB_INTERPO_NONE; + tfree(pQueryInfo->defaultVal); } void tscClearSqlMetaInfoForce(SSqlCmd* pCmd) { /* remove the metermeta/metricmeta in cache */ - // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMeterMeta), - // true); - // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMetricMeta), - // true); + // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMeterMeta), true); + // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMetricMeta), true); } -int32_t tscCreateResPointerInfo(SSqlCmd* pCmd, SSqlRes* pRes) { +int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) { if (pRes->tsrow == NULL) { pRes->numOfnchar = 0; - int32_t numOfOutputCols = pCmd->fieldsInfo.numOfOutputCols; - + + int32_t numOfOutputCols = pQueryInfo->fieldsInfo.numOfOutputCols; for (int32_t i = 0; i < numOfOutputCols; ++i) { - TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, i); + TAOS_FIELD* pField = tscFieldInfoGetField(pQueryInfo, i); if (pField->type == TSDB_DATA_TYPE_NCHAR) { pRes->numOfnchar++; } } - + pRes->tsrow = calloc(1, (POINTER_BYTES + sizeof(short)) * numOfOutputCols + POINTER_BYTES * pRes->numOfnchar); - if (pRes->tsrow == NULL) { + pRes->bytes = calloc(numOfOutputCols, sizeof(short)); + + if (pRes->numOfnchar > 0) { + pRes->buffer = calloc(POINTER_BYTES, pRes->numOfnchar); + } + + // not enough memory + if (pRes->tsrow == NULL || pRes->bytes == NULL || (pRes->buffer == NULL && pRes->numOfnchar > 0)) { + tfree(pRes->tsrow); + tfree(pRes->bytes); + tfree(pRes->buffer); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; return pRes->code; } - - pRes->bytes = (short*)((char*)pRes->tsrow + POINTER_BYTES * numOfOutputCols); - if (pRes->numOfnchar > 0) { - pRes->buffer = (char**)((char*)pRes->bytes + sizeof(short) * numOfOutputCols); - } } return TSDB_CODE_SUCCESS; } void tscDestroyResPointerInfo(SSqlRes* pRes) { - // free all buffers containing the multibyte string - for (int i = 0; i < pRes->numOfnchar; i++) { - if (pRes->buffer[i] != NULL) { + if (pRes->buffer != NULL) { + assert(pRes->numOfnchar > 0); + // free all buffers containing the multibyte string + for (int i = 0; i < pRes->numOfnchar; i++) { tfree(pRes->buffer[i]); } + + pRes->numOfnchar = 0; } - + + tfree(pRes->pRsp); tfree(pRes->tsrow); - - pRes->numOfnchar = 0; - pRes->buffer = NULL; - pRes->bytes = NULL; + + tfree(pRes->pGroupRec); + tfree(pRes->pColumnIndex); + tfree(pRes->buffer); + tfree(pRes->bytes); + + pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } void tscFreeSqlCmdData(SSqlCmd* pCmd) { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + tscFreeSubqueryInfo(pCmd); +} - tscTagCondRelease(&pCmd->tagCond); - tscClearFieldInfo(&pCmd->fieldsInfo); +/* + * this function must not change the pRes->code value, since it may be used later. + */ +void tscFreeResData(SSqlObj* pSql) { + SSqlRes* pRes = &pSql->res; + + pRes->row = 0; + + pRes->rspType = 0; + pRes->rspLen = 0; + pRes->row = 0; + + pRes->numOfRows = 0; + pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; + + pRes->numOfGroups = 0; + pRes->precision = 0; + pRes->qhandle = 0; + + pRes->offset = 0; + pRes->useconds = 0; + + tscDestroyLocalReducer(pSql); + + tscDestroyResPointerInfo(pRes); +} - tfree(pCmd->exprsInfo.pExprs); - memset(&pCmd->exprsInfo, 0, sizeof(pCmd->exprsInfo)); +void tscFreeSqlResult(SSqlObj* pSql) { + tfree(pSql->res.pRsp); + pSql->res.row = 0; + pSql->res.numOfRows = 0; + pSql->res.numOfTotal = 0; - tscColumnBaseInfoDestroy(&pCmd->colList); - memset(&pCmd->colList, 0, sizeof(pCmd->colList)); + pSql->res.numOfGroups = 0; + tfree(pSql->res.pGroupRec); - if (pCmd->tsBuf != NULL) { - tsBufDestory(pCmd->tsBuf); - pCmd->tsBuf = NULL; - } + tscDestroyLocalReducer(pSql); + + tscDestroyResPointerInfo(&pSql->res); + tfree(pSql->res.pColumnIndex); } void tscFreeSqlObjPartial(SSqlObj* pSql) { @@ -382,8 +450,6 @@ void tscFreeSqlObjPartial(SSqlObj* pSql) { } SSqlCmd* pCmd = &pSql->cmd; - SSqlRes* pRes = &pSql->res; - STscObj* pObj = pSql->pTscObj; int32_t cmd = pCmd->command; @@ -392,30 +458,18 @@ void tscFreeSqlObjPartial(SSqlObj* pSql) { tscRemoveFromSqlList(pSql); } - pCmd->command = -1; + pCmd->command = 0; // pSql->sqlstr will be used by tscBuildQueryStreamDesc pthread_mutex_lock(&pObj->mutex); tfree(pSql->sqlstr); pthread_mutex_unlock(&pObj->mutex); - tfree(pSql->res.pRsp); - pSql->res.row = 0; - pSql->res.numOfRows = 0; - pSql->res.numOfTotal = 0; - - pSql->res.numOfGroups = 0; - tfree(pSql->res.pGroupRec); - - tscDestroyLocalReducer(pSql); - + tscFreeSqlResult(pSql); tfree(pSql->pSubs); pSql->numOfSubs = 0; - tscDestroyResPointerInfo(pRes); - tfree(pSql->res.pColumnIndex); tscFreeSqlCmdData(pCmd); - tscRemoveAllMeterMetaInfo(pCmd, false); } void tscFreeSqlObj(SSqlObj* pSql) { @@ -426,6 +480,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { pSql->signature = NULL; pSql->fp = NULL; + SSqlCmd* pCmd = &pSql->cmd; memset(pCmd->payload, 0, (size_t)pCmd->allocSize); @@ -433,16 +488,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { pCmd->allocSize = 0; - if (pSql->res.buffer != NULL) { - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; i++) { - if (pSql->res.buffer[i] != NULL) { - tfree(pSql->res.buffer[i]); - } - } - - tfree(pSql->res.buffer); - } - if (pSql->fp == NULL) { tsem_destroy(&pSql->rspSem); tsem_destroy(&pSql->emptyRspSem); @@ -535,16 +580,17 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { SSqlCmd* pCmd = &pSql->cmd; assert(pDataBlock->pMeterMeta != NULL); - pCmd->count = pDataBlock->numOfMeters; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + pCmd->numOfTablesInSubmit = pDataBlock->numOfMeters; + + assert(pCmd->numOfClause == 1); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); // set the correct metermeta object, the metermeta has been locked in pDataBlocks, so it must be in the cache if (pMeterMetaInfo->pMeterMeta != pDataBlock->pMeterMeta) { strcpy(pMeterMetaInfo->name, pDataBlock->meterId); taosRemoveDataFromCache(tscCacheHandle, (void**)&(pMeterMetaInfo->pMeterMeta), false); - pMeterMetaInfo->pMeterMeta = pDataBlock->pMeterMeta; - pDataBlock->pMeterMeta = NULL; // delegate the ownership of metermeta to pMeterMetaInfo + pMeterMetaInfo->pMeterMeta = taosTransferDataInCache(tscCacheHandle, (void**)&pDataBlock->pMeterMeta); } else { assert(strncmp(pMeterMetaInfo->name, pDataBlock->meterId, tListLen(pDataBlock->meterId)) == 0); } @@ -590,7 +636,7 @@ void tscFreeUnusedDataBlocks(SDataBlockList* pList) { * @return */ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name, - STableDataBlocks** dataBlocks) { + SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks) { STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks)); if (dataBuf == NULL) { tscError("failed to allocated memory, reason:%s", strerror(errno)); @@ -598,6 +644,11 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff } dataBuf->nAllocSize = (uint32_t)initialSize; + dataBuf->headerSize = startOffset; // the header size will always be the startOffset value, reserved for the subumit block header + if (dataBuf->nAllocSize <= dataBuf->headerSize) { + dataBuf->nAllocSize = dataBuf->headerSize*2; + } + dataBuf->pData = calloc(1, dataBuf->nAllocSize); dataBuf->ordered = true; dataBuf->prevTS = INT64_MIN; @@ -610,37 +661,33 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff /* * The metermeta may be released since the metermeta cache are completed clean by other thread - * due to operation such as drop database. + * due to operation such as drop database. So here we add the reference count directly instead of invoke + * taosGetDataFromCache, which may return NULL value. */ - dataBuf->pMeterMeta = taosGetDataFromCache(tscCacheHandle, dataBuf->meterId); - assert(initialSize > 0); - - if (dataBuf->pMeterMeta == NULL) { - tfree(dataBuf); - return TSDB_CODE_QUERY_CACHE_ERASED; - } else { - *dataBlocks = dataBuf; - return TSDB_CODE_SUCCESS; - } + dataBuf->pMeterMeta = taosGetDataFromExists(tscCacheHandle, pMeterMeta); + assert(initialSize > 0 && pMeterMeta != NULL && dataBuf->pMeterMeta != NULL); + + *dataBlocks = dataBuf; + return TSDB_CODE_SUCCESS; } int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, const char* tableId, + int32_t startOffset, int32_t rowSize, const char* tableId, SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks) { *dataBlocks = NULL; - STableDataBlocks** t1 = (STableDataBlocks**) taosGetIntHashData(pHashList, id); + STableDataBlocks** t1 = (STableDataBlocks**)taosGetDataFromHash(pHashList, (const char*)&id, sizeof(id)); if (t1 != NULL) { *dataBlocks = *t1; } if (*dataBlocks == NULL) { - int32_t ret = tscCreateDataBlock((size_t) size, rowSize, startOffset, tableId, dataBlocks); + int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, tableId, pMeterMeta, dataBlocks); if (ret != TSDB_CODE_SUCCESS) { return ret; } - *dataBlocks = *(STableDataBlocks**)taosAddIntHash(pHashList, id, (char*)dataBlocks); + taosAddToHashTable(pHashList, (const char*)&id, sizeof(int64_t), (char*)dataBlocks, POINTER_BYTES); tscAppendDataBlock(pDataBlockList, *dataBlocks); } @@ -650,19 +697,20 @@ int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockList) { SSqlCmd* pCmd = &pSql->cmd; - void* pVnodeDataBlockHashList = taosInitIntHash(8, POINTER_BYTES, taosHashInt); + void* pVnodeDataBlockHashList = taosInitHashTable(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); SDataBlockList* pVnodeDataBlockList = tscCreateBlockArrayList(); for (int32_t i = 0; i < pTableDataBlockList->nSize; ++i) { STableDataBlocks* pOneTableBlock = pTableDataBlockList->pData[i]; STableDataBlocks* dataBuf = NULL; - int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pVnodeDataBlockList, pOneTableBlock->vgid, - TSDB_PAYLOAD_SIZE, tsInsertHeadSize, 0, pOneTableBlock->meterId, &dataBuf); + int32_t ret = + tscGetDataBlockFromList(pVnodeDataBlockHashList, pVnodeDataBlockList, pOneTableBlock->vgid, TSDB_PAYLOAD_SIZE, + tsInsertHeadSize, 0, pOneTableBlock->meterId, pOneTableBlock->pMeterMeta, &dataBuf); if (ret != TSDB_CODE_SUCCESS) { - tscError("%p failed to allocate the data buffer block for merging table data", pSql); - tscDestroyBlockArrayList(pTableDataBlockList); - + tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret); + taosCleanUpHashTable(pVnodeDataBlockHashList); + tscDestroyBlockArrayList(pVnodeDataBlockList); return ret; } @@ -679,7 +727,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi } else { // failed to allocate memory, free already allocated memory and return error code tscError("%p failed to allocate memory for merging submit block, size:%d", pSql, dataBuf->nAllocSize); - taosCleanUpIntHash(pVnodeDataBlockHashList); + taosCleanUpHashTable(pVnodeDataBlockHashList); tfree(dataBuf->pData); tscDestroyBlockArrayList(pVnodeDataBlockList); @@ -690,8 +738,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi SShellSubmitBlock* pBlocks = (SShellSubmitBlock*)pOneTableBlock->pData; sortRemoveDuplicates(pOneTableBlock); - tscTrace("%p meterId:%s, sid:%d, rows:%d, sversion:%d", pSql, pOneTableBlock->meterId, pBlocks->sid, - pBlocks->numOfRows, pBlocks->sversion); + char* e = (char*)pBlocks->payLoad + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); + + tscTrace("%p meterId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->meterId, pBlocks->sid, + pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->payLoad), GET_INT64_VAL(e)); pBlocks->sid = htonl(pBlocks->sid); pBlocks->uid = htobe64(pBlocks->uid); @@ -710,7 +760,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi pCmd->pDataBlocks = pVnodeDataBlockList; tscFreeUnusedDataBlocks(pCmd->pDataBlocks); - taosCleanUpIntHash(pVnodeDataBlockHashList); + taosCleanUpHashTable(pVnodeDataBlockHashList); return TSDB_CODE_SUCCESS; } @@ -729,9 +779,14 @@ void tscCloseTscObj(STscObj* pObj) { } bool tscIsInsertOrImportData(char* sqlstr) { - int32_t index = 0; - SSQLToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL); - return t0.type == TK_INSERT || t0.type == TK_IMPORT; + int32_t index = 0; + + do { + SSQLToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL); + if (t0.type != TK_LP) { + return t0.type == TK_INSERT || t0.type == TK_IMPORT; + } + } while (1); } int tscAllocPayload(SSqlCmd* pCmd, int size) { @@ -818,7 +873,7 @@ void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIE } void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible) { - if (index < 0 || index > pFieldInfo->numOfOutputCols) { + if (index < 0 || index >= pFieldInfo->numOfOutputCols) { return; } @@ -847,8 +902,8 @@ void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, co pFieldInfo->numOfOutputCols++; } -void tscFieldInfoCalOffset(SSqlCmd* pCmd) { - SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; +void tscFieldInfoCalOffset(SQueryInfo* pQueryInfo) { + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; pFieldInfo->pOffset[0] = 0; for (int32_t i = 1; i < pFieldInfo->numOfOutputCols; ++i) { @@ -856,8 +911,8 @@ void tscFieldInfoCalOffset(SSqlCmd* pCmd) { } } -void tscFieldInfoUpdateOffset(SSqlCmd* pCmd) { - SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; +void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo) { + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; if (pFieldInfo->numOfOutputCols == 0) { return; } @@ -869,7 +924,7 @@ void tscFieldInfoUpdateOffset(SSqlCmd* pCmd) { * for potential secondary merge exists */ for (int32_t i = 1; i < pFieldInfo->numOfOutputCols; ++i) { - pFieldInfo->pOffset[i] = pFieldInfo->pOffset[i - 1] + tscSqlExprGet(pCmd, i - 1)->resBytes; + pFieldInfo->pOffset[i] = pFieldInfo->pOffset[i - 1] + tscSqlExprGet(pQueryInfo, i - 1)->resBytes; } } @@ -880,7 +935,7 @@ void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList if (size <= 0) { *dst = *src; - tscFieldInfoCopyAll(src, dst); + tscFieldInfoCopyAll(dst, src); } else { // only copy the required column for (int32_t i = 0; i < size; ++i) { assert(indexList[i] >= 0 && indexList[i] <= src->numOfOutputCols); @@ -889,7 +944,7 @@ void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList } } -void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst) { +void tscFieldInfoCopyAll(SFieldInfo* dst, SFieldInfo* src) { *dst = *src; dst->pFields = malloc(sizeof(TAOS_FIELD) * dst->numOfAlloc); @@ -901,24 +956,46 @@ void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst) { memcpy(dst->pVisibleCols, src->pVisibleCols, sizeof(bool) * dst->numOfOutputCols); } -TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index) { - if (index >= pCmd->fieldsInfo.numOfOutputCols) { +TAOS_FIELD* tscFieldInfoGetField(SQueryInfo* pQueryInfo, int32_t index) { + if (index >= pQueryInfo->fieldsInfo.numOfOutputCols) { return NULL; } - return &pCmd->fieldsInfo.pFields[index]; + return &pQueryInfo->fieldsInfo.pFields[index]; } -int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index) { - if (index >= pCmd->fieldsInfo.numOfOutputCols) { +int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutputCols; } + +int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { + if (index >= pQueryInfo->fieldsInfo.numOfOutputCols) { return 0; } - return pCmd->fieldsInfo.pOffset[index]; + return pQueryInfo->fieldsInfo.pOffset[index]; } -int32_t tscGetResRowLength(SSqlCmd* pCmd) { - SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; +int32_t tscFieldInfoCompare(SFieldInfo* pFieldInfo1, SFieldInfo* pFieldInfo2) { + assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL); + + if (pFieldInfo1->numOfOutputCols != pFieldInfo2->numOfOutputCols) { + return pFieldInfo1->numOfOutputCols - pFieldInfo2->numOfOutputCols; + } + + for (int32_t i = 0; i < pFieldInfo1->numOfOutputCols; ++i) { + TAOS_FIELD* pField1 = &pFieldInfo1->pFields[i]; + TAOS_FIELD* pField2 = &pFieldInfo2->pFields[i]; + + if (pField1->type != pField2->type || pField1->bytes != pField2->bytes || + strcasecmp(pField1->name, pField2->name) != 0) { + return 1; + } + } + + return 0; +} + +int32_t tscGetResRowLength(SQueryInfo* pQueryInfo) { + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; if (pFieldInfo->numOfOutputCols <= 0) { return 0; } @@ -968,8 +1045,8 @@ static void _exprEvic(SSqlExprInfo* pExprInfo, int32_t index) { } } -SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId) { - SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; +SSqlExpr* tscSqlExprInsertEmpty(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId) { + SSqlExprInfo* pExprInfo = &pQueryInfo->exprsInfo; _exprCheckSpace(pExprInfo, pExprInfo->numOfExprs + 1); _exprEvic(pExprInfo, index); @@ -981,11 +1058,11 @@ SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId return pExpr; } -SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pColIndex->tableIndex); +SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, + int16_t type, int16_t size, int16_t interSize) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pColIndex->tableIndex); - SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; + SSqlExprInfo* pExprInfo = &pQueryInfo->exprsInfo; _exprCheckSpace(pExprInfo, pExprInfo->numOfExprs + 1); _exprEvic(pExprInfo, index); @@ -1025,10 +1102,10 @@ SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SCo return pExpr; } -SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, - int16_t size) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; +SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, + int16_t type, int16_t size) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + SSqlExprInfo* pExprInfo = &pQueryInfo->exprsInfo; if (index > pExprInfo->numOfExprs) { return NULL; } @@ -1059,14 +1136,45 @@ void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, assert(pExpr->numOfParams <= 3); } -SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index) { - if (pCmd->exprsInfo.numOfExprs <= index) { +SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index) { + if (pQueryInfo->exprsInfo.numOfExprs <= index) { return NULL; } - return &pCmd->exprsInfo.pExprs[index]; + return &pQueryInfo->exprsInfo.pExprs[index]; +} + +void* tscSqlExprDestroy(SSqlExpr* pExpr) { + if (pExpr == NULL) { + return NULL; + } + + for(int32_t i = 0; i < tListLen(pExpr->param); ++i) { + tVariantDestroy(&pExpr->param[i]); + } + + return NULL; } +/* + * NOTE: Does not release SSqlExprInfo here. + */ +void tscSqlExprInfoDestroy(SSqlExprInfo* pExprInfo) { + if (pExprInfo->numOfAlloc == 0) { + return; + } + + for(int32_t i = 0; i < pExprInfo->numOfAlloc; ++i) { + tscSqlExprDestroy(&pExprInfo->pExprs[i]); + } + + tfree(pExprInfo->pExprs); + + pExprInfo->numOfAlloc = 0; + pExprInfo->numOfExprs = 0; +} + + void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t tableuid) { if (src == NULL) { return; @@ -1074,7 +1182,7 @@ void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t tableui *dst = *src; - dst->pExprs = malloc(sizeof(SSqlExpr) * dst->numOfAlloc); + dst->pExprs = calloc(dst->numOfAlloc, sizeof(SSqlExpr)); int16_t num = 0; for (int32_t i = 0; i < src->numOfExprs; ++i) { if (src->pExprs[i].uid == tableuid) { @@ -1143,8 +1251,8 @@ void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableI } // todo refactor -SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* pColIndex) { - SColumnBaseInfo* pcolList = &pCmd->colList; +SColumnBase* tscColumnBaseInfoInsert(SQueryInfo* pQueryInfo, SColumnIndex* pColIndex) { + SColumnBaseInfo* pcolList = &pQueryInfo->colList; // ignore the tbname column to be inserted into source list if (pColIndex->columnIndex < 0) { @@ -1172,7 +1280,6 @@ SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* pColIndex) { pcolList->pColList[i].colIndex = *pColIndex; pcolList->numOfCols++; - pCmd->numOfCols++; } return &pcolList->pColList[i]; @@ -1390,13 +1497,12 @@ void tscIncStreamExecutionCount(void* pStream) { ps->num += 1; } -bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +bool tscValidateColumnId(SMeterMetaInfo* pMeterMetaInfo, int32_t colId) { if (pMeterMetaInfo->pMeterMeta == NULL) { return false; } - if (colId == -1 && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (colId == -1 && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return true; } @@ -1444,12 +1550,12 @@ void tscTagCondRelease(STagCond* pCond) { memset(pCond, 0, sizeof(STagCond)); } -void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); pColInfo[i].functionId = pExpr->functionId; if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { @@ -1472,18 +1578,20 @@ void tscSetFreeHeatBeat(STscObj* pObj) { assert(pHeatBeat == pHeatBeat->signature); // to denote the heart-beat timer close connection and free all allocated resources - pHeatBeat->cmd.type = TSDB_QUERY_TYPE_FREE_RESOURCE; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pHeatBeat->cmd, 0); + pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; } bool tscShouldFreeHeatBeat(SSqlObj* pHb) { assert(pHb == pHb->signature); - return pHb->cmd.type == TSDB_QUERY_TYPE_FREE_RESOURCE; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pHb->cmd, 0); + return pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE; } void tscCleanSqlCmd(SSqlCmd* pCmd) { - tscFreeSqlCmdData(pCmd); - - assert(pCmd->pMeterInfo == NULL); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + tscFreeSubqueryInfo(pCmd); uint32_t allocSize = pCmd->allocSize; char* allocPtr = pCmd->payload; @@ -1537,9 +1645,10 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { * data blocks have been submit to vnode. */ SDataBlockList* pDataBlocks = pCmd->pDataBlocks; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - assert(pSql->cmd.numOfTables == 1); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1 || pQueryInfo->numOfTables == 2); if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { tscTrace("%p object should be release since all data blocks have been submit", pSql); @@ -1553,19 +1662,68 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { } } -SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index) { - if (pCmd == NULL || pCmd->numOfTables == 0) { +/** + * + * @param pCmd + * @param clauseIndex denote the index of the union sub clause, usually are 0, if no union query exists. + * @param tableIndex denote the table index for join query, where more than one table exists + * @return + */ +SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t clauseIndex, int32_t tableIndex) { + if (pCmd == NULL || pCmd->numOfClause == 0) { + return NULL; + } + + assert(clauseIndex >= 0 && clauseIndex < pCmd->numOfClause); + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + return tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); +} + +SMeterMetaInfo* tscGetMeterMetaInfoFromQueryInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) { + assert(pQueryInfo != NULL); + + if (pQueryInfo->pMeterInfo == NULL) { + assert(pQueryInfo->numOfTables == 0); + return NULL; + } + + assert(tableIndex >= 0 && tableIndex <= pQueryInfo->numOfTables && pQueryInfo->pMeterInfo != NULL); + + return pQueryInfo->pMeterInfo[tableIndex]; +} + +SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) { + assert(pCmd != NULL && subClauseIndex >= 0 && subClauseIndex < TSDB_MAX_UNION_CLAUSE); + + if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) { return NULL; } - assert(index >= 0 && index <= pCmd->numOfTables && pCmd->pMeterInfo != NULL); - return pCmd->pMeterInfo[index]; + return pCmd->pQueryInfo[subClauseIndex]; +} + +int32_t tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo) { + int32_t ret = TSDB_CODE_SUCCESS; + + *pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); + + while ((*pQueryInfo) == NULL) { + if ((ret = tscAddSubqueryInfo(pCmd)) != TSDB_CODE_SUCCESS) { + return ret; + } + + (*pQueryInfo) = tscGetQueryInfoDetail(pCmd, subClauseIndex); + } + + return TSDB_CODE_SUCCESS; } -SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index) { +SMeterMetaInfo* tscGetMeterMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index) { int32_t k = -1; - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - if (pCmd->pMeterInfo[i]->pMeterMeta->uid == uid) { + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + if (pQueryInfo->pMeterInfo[i]->pMeterMeta->uid == uid) { k = i; break; } @@ -1575,20 +1733,80 @@ SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* i *index = k; } - return tscGetMeterMetaInfo(pCmd, k); + assert(k != -1); + return tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, k); +} + +int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) { + assert(pCmd != NULL); + + size_t s = pCmd->numOfClause + 1; + char* tmp = realloc(pCmd->pQueryInfo, s * POINTER_BYTES); + if (tmp == NULL) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } + + pCmd->pQueryInfo = (SQueryInfo**)tmp; + + SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo)); + pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer + + pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo; + return TSDB_CODE_SUCCESS; } -SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, - int16_t numOfTags, int16_t* tags) { - void* pAlloc = realloc(pCmd->pMeterInfo, (pCmd->numOfTables + 1) * POINTER_BYTES); +static void doClearSubqueryInfo(SQueryInfo* pQueryInfo) { + tscTagCondRelease(&pQueryInfo->tagCond); + tscClearFieldInfo(&pQueryInfo->fieldsInfo); + + tscSqlExprInfoDestroy(&pQueryInfo->exprsInfo); + memset(&pQueryInfo->exprsInfo, 0, sizeof(pQueryInfo->exprsInfo)); + + tscColumnBaseInfoDestroy(&pQueryInfo->colList); + memset(&pQueryInfo->colList, 0, sizeof(pQueryInfo->colList)); + + pQueryInfo->tsBuf = tsBufDestory(pQueryInfo->tsBuf); + + tfree(pQueryInfo->defaultVal); +} + +void tscClearSubqueryInfo(SSqlCmd* pCmd) { + for (int32_t i = 0; i < pCmd->numOfClause; ++i) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i); + doClearSubqueryInfo(pQueryInfo); + } +} + +void tscFreeSubqueryInfo(SSqlCmd* pCmd) { + if (pCmd == NULL || pCmd->numOfClause == 0) { + return; + } + + for (int32_t i = 0; i < pCmd->numOfClause; ++i) { + char* addr = (char*)pCmd - offsetof(SSqlObj, cmd); + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i); + + doClearSubqueryInfo(pQueryInfo); + tscRemoveAllMeterMetaInfo(pQueryInfo, (const char*)addr, false); + tfree(pQueryInfo); + } + + pCmd->numOfClause = 0; + tfree(pCmd->pQueryInfo); +} + +SMeterMetaInfo* tscAddMeterMetaInfo(SQueryInfo* pQueryInfo, const char* name, SMeterMeta* pMeterMeta, + SMetricMeta* pMetricMeta, int16_t numOfTags, int16_t* tags) { + void* pAlloc = realloc(pQueryInfo->pMeterInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES); if (pAlloc == NULL) { return NULL; } - pCmd->pMeterInfo = pAlloc; - pCmd->pMeterInfo[pCmd->numOfTables] = calloc(1, sizeof(SMeterMetaInfo)); + pQueryInfo->pMeterInfo = pAlloc; + pQueryInfo->pMeterInfo[pQueryInfo->numOfTables] = calloc(1, sizeof(SMeterMetaInfo)); - SMeterMetaInfo* pMeterMetaInfo = pCmd->pMeterInfo[pCmd->numOfTables]; + SMeterMetaInfo* pMeterMetaInfo = pQueryInfo->pMeterInfo[pQueryInfo->numOfTables]; assert(pMeterMetaInfo != NULL); if (name != NULL) { @@ -1604,41 +1822,41 @@ SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(pMeterMetaInfo->tagColumnIndex[0]) * numOfTags); } - pCmd->numOfTables += 1; - + pQueryInfo->numOfTables += 1; return pMeterMetaInfo; } -SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd) { return tscAddMeterMetaInfo(pCmd, NULL, NULL, NULL, 0, NULL); } +SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SQueryInfo* pQueryInfo) { + return tscAddMeterMetaInfo(pQueryInfo, NULL, NULL, NULL, 0, NULL); +} -void tscRemoveMeterMetaInfo(SSqlCmd* pCmd, int32_t index, bool removeFromCache) { - if (index < 0 || index >= pCmd->numOfTables) { +void doRemoveMeterMetaInfo(SQueryInfo* pQueryInfo, int32_t index, bool removeFromCache) { + if (index < 0 || index >= pQueryInfo->numOfTables) { return; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index); tscClearMeterMetaInfo(pMeterMetaInfo, removeFromCache); free(pMeterMetaInfo); - int32_t after = pCmd->numOfTables - index - 1; + int32_t after = pQueryInfo->numOfTables - index - 1; if (after > 0) { - memmove(&pCmd->pMeterInfo[index], &pCmd->pMeterInfo[index + 1], after * sizeof(void*)); + memmove(&pQueryInfo->pMeterInfo[index], &pQueryInfo->pMeterInfo[index + 1], after * POINTER_BYTES); } - pCmd->numOfTables -= 1; + pQueryInfo->numOfTables -= 1; } -void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache) { - int64_t addr = offsetof(SSqlObj, cmd); +void tscRemoveAllMeterMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache) { + tscTrace("%p deref the metric/meter meta in cache, numOfTables:%d", address, pQueryInfo->numOfTables); - tscTrace("%p deref the metric/meter meta in cache, numOfTables:%d", ((char*)pCmd - addr), pCmd->numOfTables); - - while (pCmd->numOfTables > 0) { - tscRemoveMeterMetaInfo(pCmd, pCmd->numOfTables - 1, removeFromCache); + int32_t index = pQueryInfo->numOfTables; + while (index >= 0) { + doRemoveMeterMetaInfo(pQueryInfo, --index, removeFromCache); } - tfree(pCmd->pMeterInfo); + tfree(pQueryInfo->pMeterInfo); } void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache) { @@ -1651,13 +1869,17 @@ void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache) } void tscResetForNextRetrieve(SSqlRes* pRes) { + if (pRes == NULL) { + return; + } + pRes->row = 0; pRes->numOfRows = 0; } SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql) { SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, tableIndex); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); if (pNew == NULL) { @@ -1682,17 +1904,34 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void pNew->cmd.payload = NULL; pNew->cmd.allocSize = 0; - pNew->cmd.pMeterInfo = NULL; + pNew->cmd.pQueryInfo = NULL; + pNew->cmd.numOfClause = 0; + pNew->cmd.clauseIndex = 0; + + if (tscAddSubqueryInfo(&pNew->cmd) != TSDB_CODE_SUCCESS) { + tscFreeSqlObj(pNew); + return NULL; + } + + SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - pNew->cmd.colList.pColList = NULL; - pNew->cmd.colList.numOfAlloc = 0; - pNew->cmd.colList.numOfCols = 0; + memcpy(pNewQueryInfo, pQueryInfo, sizeof(SQueryInfo)); - pNew->cmd.numOfTables = 0; - pNew->cmd.tsBuf = NULL; + memset(&pNewQueryInfo->colList, 0, sizeof(pNewQueryInfo->colList)); + memset(&pNewQueryInfo->fieldsInfo, 0, sizeof(SFieldInfo)); - memset(&pNew->cmd.fieldsInfo, 0, sizeof(SFieldInfo)); - tscTagCondCopy(&pNew->cmd.tagCond, &pCmd->tagCond); + pNewQueryInfo->pMeterInfo = NULL; + pNewQueryInfo->defaultVal = NULL; + pNewQueryInfo->numOfTables = 0; + pNewQueryInfo->tsBuf = NULL; + + tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond); + + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + pNewQueryInfo->defaultVal = malloc(pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(int64_t)); + memcpy(pNewQueryInfo->defaultVal, pQueryInfo->defaultVal, pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(int64_t)); + } if (tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) { tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); @@ -1700,40 +1939,41 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void return NULL; } - tscColumnBaseInfoCopy(&pNew->cmd.colList, &pCmd->colList, (int16_t)tableIndex); + tscColumnBaseInfoCopy(&pNewQueryInfo->colList, &pQueryInfo->colList, (int16_t)tableIndex); // set the correct query type if (pPrevSql != NULL) { - pNew->cmd.type = pPrevSql->cmd.type; + SQueryInfo* pPrevQueryInfo = tscGetQueryInfoDetail(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex); + pNewQueryInfo->type = pPrevQueryInfo->type; } else { - pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; // it must be the subquery + pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; // it must be the subquery } uint64_t uid = pMeterMetaInfo->pMeterMeta->uid; - tscSqlExprCopy(&pNew->cmd.exprsInfo, &pCmd->exprsInfo, uid); + tscSqlExprCopy(&pNewQueryInfo->exprsInfo, &pQueryInfo->exprsInfo, uid); - int32_t numOfOutputCols = pNew->cmd.exprsInfo.numOfExprs; + int32_t numOfOutputCols = pNewQueryInfo->exprsInfo.numOfExprs; if (numOfOutputCols > 0) { int32_t* indexList = calloc(1, numOfOutputCols * sizeof(int32_t)); - for (int32_t i = 0, j = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0, j = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->uid == uid) { indexList[j++] = i; } } - tscFieldInfoCopy(&pCmd->fieldsInfo, &pNew->cmd.fieldsInfo, indexList, numOfOutputCols); + tscFieldInfoCopy(&pQueryInfo->fieldsInfo, &pNewQueryInfo->fieldsInfo, indexList, numOfOutputCols); free(indexList); - tscFieldInfoUpdateOffset(&pNew->cmd); + tscFieldInfoUpdateOffsetForInterResult(pNewQueryInfo); } pNew->fp = fp; pNew->param = param; char key[TSDB_MAX_TAGS_LEN + 1] = {0}; - tscGetMetricMetaCacheKey(pCmd, key, uid); + tscGetMetricMetaCacheKey(pQueryInfo, key, uid); #ifdef _DEBUG_VIEW printf("the metricmeta key is:%s\n", key); @@ -1746,31 +1986,41 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void SMeterMeta* pMeterMeta = taosGetDataFromCache(tscCacheHandle, name); SMetricMeta* pMetricMeta = taosGetDataFromCache(tscCacheHandle, key); - pFinalInfo = tscAddMeterMetaInfo(&pNew->cmd, name, pMeterMeta, pMetricMeta, pMeterMetaInfo->numOfTags, + pFinalInfo = tscAddMeterMetaInfo(pNewQueryInfo, name, pMeterMeta, pMetricMeta, pMeterMetaInfo->numOfTags, pMeterMetaInfo->tagColumnIndex); - } else { - SMeterMetaInfo* pPrevInfo = tscGetMeterMetaInfo(&pPrevSql->cmd, 0); - pFinalInfo = tscAddMeterMetaInfo(&pNew->cmd, name, pPrevInfo->pMeterMeta, pPrevInfo->pMetricMeta, - pMeterMetaInfo->numOfTags, pMeterMetaInfo->tagColumnIndex); + } else { // transfer the ownership of pMeterMeta/pMetricMeta to the newly create sql object. + SMeterMetaInfo* pPrevInfo = tscGetMeterMetaInfo(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0); - pPrevInfo->pMeterMeta = NULL; - pPrevInfo->pMetricMeta = NULL; + SMeterMeta* pPrevMeterMeta = taosTransferDataInCache(tscCacheHandle, (void**)&pPrevInfo->pMeterMeta); + SMetricMeta* pPrevMetricMeta = taosTransferDataInCache(tscCacheHandle, (void**)&pPrevInfo->pMetricMeta); + + pFinalInfo = tscAddMeterMetaInfo(pNewQueryInfo, name, pPrevMeterMeta, pPrevMetricMeta, pMeterMetaInfo->numOfTags, + pMeterMetaInfo->tagColumnIndex); } - assert(pFinalInfo->pMeterMeta != NULL); - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + assert(pFinalInfo->pMeterMeta != NULL && pNewQueryInfo->numOfTables == 1); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { assert(pFinalInfo->pMetricMeta != NULL); } + + tscTrace( + "%p new subquery: %p, tableIndex:%d, vnodeIdx:%d, type:%d, exprInfo:%d, colList:%d," + "fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64, + pSql, pNew, tableIndex, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, pNewQueryInfo->exprsInfo.numOfExprs, + pNewQueryInfo->colList.numOfCols, pNewQueryInfo->fieldsInfo.numOfOutputCols, pFinalInfo->name, pNewQueryInfo->stime, + pNewQueryInfo->etime, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit); + + tscPrintSelectClause(pNew, 0); - tscTrace("%p new subquery %p, tableIndex:%d, vnodeIdx:%d, type:%d", pSql, pNew, tableIndex, - pMeterMetaInfo->vnodeIndex, pNew->cmd.type); return pNew; } void tscDoQuery(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; void* fp = pSql->fp; - + + pSql->res.code = TSDB_CODE_SUCCESS; + if (pCmd->command > TSDB_SQL_LOCAL) { tscProcessLocalCmd(pSql); } else { @@ -1778,8 +2028,8 @@ void tscDoQuery(SSqlObj* pSql) { tscAddIntoSqlList(pSql); } - if (pCmd->isInsertFromFile == 1) { - tscProcessMultiVnodesInsertForFile(pSql); + if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) { + tscProcessMultiVnodesInsertFromFile(pSql); } else { // pSql may be released in this function if it is a async insertion. tscProcessSql(pSql); @@ -1834,11 +2084,152 @@ int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* s return TSDB_CODE_INVALID_SQL; } -bool tscHasReachLimitation(SSqlObj* pSql) { - assert(pSql != NULL && pSql->cmd.globalLimit != 0); +bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) { + assert(pQueryInfo != NULL && pQueryInfo->clauseLimit != 0); + return (pQueryInfo->clauseLimit > 0 && pRes->numOfTotalInCurrentClause >= pQueryInfo->clauseLimit); +} + +char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; } + +/** + * If current vnode query does not return results anymore (pRes->numOfRows == 0), try the next vnode if exists, + * in case of multi-vnode super table projection query and the result does not reach the limitation. + */ +bool hasMoreVnodesToTry(SSqlObj* pSql) { + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo) || (pMeterMetaInfo->pMetricMeta == NULL)) { + return false; + } + + int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes; + return pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && + (!tscHasReachLimitation(pQueryInfo, pRes)) && (pMeterMetaInfo->vnodeIndex < totalVnode - 1); +} +void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - return (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + /* + * no result returned from the current virtual node anymore, try the next vnode if exists + * if case of: multi-vnode super table projection query + */ + assert(pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !tscHasReachLimitation(pQueryInfo, pRes)); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes; + + while (++pMeterMetaInfo->vnodeIndex < totalVnode) { + tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql, + pMeterMetaInfo->vnodeIndex - 1, pMeterMetaInfo->vnodeIndex, totalVnode, pRes->numOfTotalInCurrentClause); + + /* + * update the limit and offset value for the query on the next vnode, + * according to current retrieval results + * + * NOTE: + * if the pRes->offset is larger than 0, the start returned position has not reached yet. + * Therefore, the pRes->numOfRows, as well as pRes->numOfTotalInCurrentClause, must be 0. + * The pRes->offset value will be updated by virtual node, during query execution. + */ + if (pQueryInfo->clauseLimit >= 0) { + pQueryInfo->limit.limit = pQueryInfo->clauseLimit - pRes->numOfTotalInCurrentClause; + } + + pQueryInfo->limit.offset = pRes->offset; + + assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); + tscTrace("%p new query to next vnode, vnode index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64, pSql, + pMeterMetaInfo->vnodeIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit); + + /* + * For project query with super table join, the numOfSub is equalled to the number of all subqueries. + * Therefore, we need to reset the value of numOfSubs to be 0. + * + * For super table join with projection query, if anyone of the subquery is exhausted, the query completed. + */ + pSql->numOfSubs = 0; + pCmd->command = TSDB_SQL_SELECT; + + tscResetForNextRetrieve(pRes); + + // in case of async query, set the callback function + void* fp1 = pSql->fp; + pSql->fp = fp; + + if (fp1 != NULL) { + assert(fp != NULL); + } + + int32_t ret = tscProcessSql(pSql); // todo check for failure + + // in case of async query, return now + if (fp != NULL) { + return; + } + + if (ret != TSDB_CODE_SUCCESS) { + pSql->res.code = ret; + return; + } + + // retrieve data + assert(pCmd->command == TSDB_SQL_SELECT); + pCmd->command = TSDB_SQL_FETCH; + + if ((ret = tscProcessSql(pSql)) != TSDB_CODE_SUCCESS) { + pSql->res.code = ret; + return; + } + + // if the result from current virtual node are empty, try next if exists. otherwise, return the results. + if (pRes->numOfRows > 0) { + break; + } + } + + if (pRes->numOfRows == 0) { + tscTrace("%p all vnodes exhausted, prj query completed. total res:%d", pSql, totalVnode, pRes->numOfTotal); + } +} + +void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) { + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + // current subclause is completed, try the next subclause + assert(pCmd->clauseIndex < pCmd->numOfClause - 1); + + pCmd->clauseIndex++; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + pSql->cmd.command = pQueryInfo->command; + + //backup the total number of result first + int64_t num = pRes->numOfTotal + pRes->numOfTotalInCurrentClause; + tscFreeResData(pSql); + + pRes->numOfTotal = num; + + tfree(pSql->pSubs); + pSql->numOfSubs = 0; + + if (pSql->fp != NULL) { + pSql->fp = queryFp; + assert(queryFp != NULL); + } + + tscTrace("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause); + if (pCmd->command > TSDB_SQL_LOCAL) { + tscProcessLocalCmd(pSql); + } else { + tscProcessSql(pSql); + } } diff --git a/src/connector/grafana/tdengine/dist/plugin.json b/src/connector/grafana/tdengine/dist/plugin.json index 3734fbfc2d9e18850a859d30f47f5b9579c5e65f..e9954ce6ce16c7b943f3002896144891c9dbc629 100644 --- a/src/connector/grafana/tdengine/dist/plugin.json +++ b/src/connector/grafana/tdengine/dist/plugin.json @@ -1,6 +1,6 @@ { "name": "TDengine", - "id": "tdengine", + "id": "taosdata-tdengine-datasource", "type": "datasource", "partials": { @@ -24,8 +24,8 @@ {"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"}, {"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"} ], - "version": "1.6.0", - "updated": "2019-11-12" + "version": "1.0.0", + "updated": "2020-01-13" }, "dependencies": { diff --git a/src/connector/grafana/tdengine/package.json b/src/connector/grafana/tdengine/package.json index 83d29b78ced97502b8fa1c20c4a568bb0f46ba19..8e542bef2647aa6bb4fe2f404665a4ce9c707345 100644 --- a/src/connector/grafana/tdengine/package.json +++ b/src/connector/grafana/tdengine/package.json @@ -1,9 +1,8 @@ { "name": "TDengine", - "private": true, + "private": false, "version": "1.0.0", "description": "grafana datasource plugin for tdengine", - "main": "index.js", "scripts": { "build": "./node_modules/grunt-cli/bin/grunt", "test": "./node_modules/grunt-cli/bin/grunt mochaTest" @@ -12,7 +11,7 @@ "type": "git", "url": "git+https://github.com/taosdata/TDengine.git" }, - "author": "", + "author": "https://www.taosdata.com", "license": "AGPL 3.0", "bugs": { "url": "https://github.com/taosdata/TDengine/issues" diff --git a/src/connector/grafana/tdengine/src/plugin.json b/src/connector/grafana/tdengine/src/plugin.json index 3734fbfc2d9e18850a859d30f47f5b9579c5e65f..e9954ce6ce16c7b943f3002896144891c9dbc629 100644 --- a/src/connector/grafana/tdengine/src/plugin.json +++ b/src/connector/grafana/tdengine/src/plugin.json @@ -1,6 +1,6 @@ { "name": "TDengine", - "id": "tdengine", + "id": "taosdata-tdengine-datasource", "type": "datasource", "partials": { @@ -24,8 +24,8 @@ {"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"}, {"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"} ], - "version": "1.6.0", - "updated": "2019-11-12" + "version": "1.0.0", + "updated": "2020-01-13" }, "dependencies": { diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index 86a3489d0789d19a73f5318563569d4527845313..505619436cc1ad5d01a4134aede29477c6f6ae48 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -13,14 +13,14 @@ def _convert_microsecond_to_datetime(micro): def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - _timstamp_converter = _convert_millisecond_to_datetime + _timestamp_converter = _convert_millisecond_to_datetime if micro: - _timstamp_converter = _convert_microsecond_to_datetime + _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) else: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query @@ -275,8 +312,8 @@ class CTaosInterface(object): if num_of_rows == 0: return None, 0 - blocks = [None] * len(fields) isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] @@ -351,4 +388,20 @@ class CTaosInterface(object): def errStr(connection): """Return the error styring """ - return CTaosInterface.libtaos.taos_errstr(connection) \ No newline at end of file + return CTaosInterface.libtaos.taos_errstr(connection) + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/linux/python2/taos/connection.py index ba24209552600d6ee75258f929eeff829dd7b675..04fbbdec04144624a0b1f4ba25083a91ade21cce 100644 --- a/src/connector/python/linux/python2/taos/connection.py +++ b/src/connector/python/linux/python2/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -50,6 +50,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/linux/python2/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..2d01395532820c3bd0e068ef7eb3d425eaaa6d78 --- /dev/null +++ b/src/connector/python/linux/python2/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index 259c8bbd060b44f7c1b60b5c015519ed862c8ec2..7fcedc9fe9400cc8db007897906d4568c2eb234f 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py index ba24209552600d6ee75258f929eeff829dd7b675..04fbbdec04144624a0b1f4ba25083a91ade21cce 100644 --- a/src/connector/python/linux/python3/taos/connection.py +++ b/src/connector/python/linux/python3/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -50,6 +50,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cf10d5ada578687689b94454378dd543368e3e --- /dev/null +++ b/src/connector/python/linux/python3/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py index 8e3b7019290153a8bce475a5f2db43bc8ab04399..f8cdfcc51ea1ea9ae5789c47f2b9e54879a53934 100644 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ b/src/connector/python/windows/python2/taos/cinterface.py @@ -13,14 +13,14 @@ def _convert_microsecond_to_datetime(micro): def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - _timstamp_converter = _convert_millisecond_to_datetime + _timestamp_converter = _convert_millisecond_to_datetime if micro: - _timstamp_converter = _convert_microsecond_to_datetime + _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) else: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query @@ -275,8 +312,8 @@ class CTaosInterface(object): if num_of_rows == 0: return None, 0 - blocks = [None] * len(fields) isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] @@ -351,4 +388,20 @@ class CTaosInterface(object): def errStr(connection): """Return the error styring """ - return CTaosInterface.libtaos.taos_errstr(connection) \ No newline at end of file + return CTaosInterface.libtaos.taos_errstr(connection) + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py index ba24209552600d6ee75258f929eeff829dd7b675..e2783975d9c0f63c82a90b41a11fbaa0a3ffb5ac 100644 --- a/src/connector/python/windows/python2/taos/connection.py +++ b/src/connector/python/windows/python2/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -15,7 +15,8 @@ class TDengineConnection(object): self._config = None self._chandle = None - self.config(**kwargs) + if len(kwargs) > 0: + self.config(**kwargs) def config(self, **kwargs): # host @@ -50,6 +51,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cf10d5ada578687689b94454378dd543368e3e --- /dev/null +++ b/src/connector/python/windows/python2/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py index 2cddf5fccf606be3d6dc60a538636204ee59565c..b4b44e199c37cf90c9beddb16433591bc0713b23 100644 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ b/src/connector/python/windows/python3/taos/cinterface.py @@ -1,370 +1,407 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) - else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] - else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] - else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] - else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] - else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - - res = [] - - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - except ValueError: - res.append(None) - - return res - # if num_of_rows > 0: - # for i in range(abs(num_of_rows)): - # try: - # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - # except ValueError: - # res.append(None) - # return res - # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] - # else: - # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python -} - -# Corresponding TAOS_FIELD structure in C -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 64), - ('bytes', ctypes.c_short), - ('type', ctypes.c_char)] - -# C interface class -class CTaosInterface(object): - - libtaos = ctypes.windll.LoadLibrary('taos') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config != None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value == None: - print('connect to TDengine failed') - # sys.exit(1) - else: - print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(connection): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(connection) - - @staticmethod - def useResult(connection): - '''Use result after calling self.query - ''' - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(connection)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return result, fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - - if num_of_rows == 0: - return None, 0 - - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") - - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(connection): - return CTaosInterface.libtaos.taos_field_count(connection) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(connection): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(connection) - - @staticmethod - def errStr(connection): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - - print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) - print('Affected rows: {}'.format(cinter.affectedRows(conn))) - - result, des = CTaosInterface.useResult(conn) - - data, num_of_rows = CTaosInterface.fetchBlock(result, des) - - print(data) - +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli/1000.0) + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro/1000000.0) + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timestamp_converter = _convert_millisecond_to_datetime + if micro: + _timestamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + else: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + if num_of_rows > 0: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + + res = [] + + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + else: + res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + except ValueError: + res.append(None) + + return res + # if num_of_rows > 0: + # for i in range(abs(num_of_rows)): + # try: + # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + # except ValueError: + # res.append(None) + # return res + # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] + # else: + # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT : _crow_tinyint_to_python, + FieldType.C_SMALLINT : _crow_smallint_to_python, + FieldType.C_INT : _crow_int_to_python, + FieldType.C_BIGINT : _crow_bigint_to_python, + FieldType.C_FLOAT : _crow_float_to_python, + FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP : _crow_timestamp_to_python, + FieldType.C_NCHAR : _crow_nchar_to_python +} + +# Corresponding TAOS_FIELD structure in C +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 64), + ('bytes', ctypes.c_short), + ('type', ctypes.c_char)] + +# C interface class +class CTaosInterface(object): + + libtaos = ctypes.windll.LoadLibrary('taos') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config != None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value == None: + print('connect to TDengine failed') + # sys.exit(1) + else: + print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + # finally: + # CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(connection): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(connection) + + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + + @staticmethod + def useResult(connection): + '''Use result after calling self.query + ''' + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(connection)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return result, fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + + if num_of_rows == 0: + return None, 0 + + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) + + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(connection): + return CTaosInterface.libtaos.taos_field_count(connection) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(connection): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(connection) + + @staticmethod + def errStr(connection): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8') + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py index a88e25a6db6f187f2a257303189c66851bb260f6..e2783975d9c0f63c82a90b41a11fbaa0a3ffb5ac 100644 --- a/src/connector/python/windows/python3/taos/connection.py +++ b/src/connector/python/windows/python3/taos/connection.py @@ -1,81 +1,89 @@ -# from .cursor import TDengineCursor -from .cursor import TDengineCursor -from .cinterface import CTaosInterface - -class TDengineConnection(object): - """ TDengine connection object - """ - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - if len(kwargs) > 0: - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - result = self._chandle.useResult(self._conn)[0] - if result: - self._chandle.freeResult(result) - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() +from .cursor import TDengineCursor +from .subscription import TDengineSubscription +from .cinterface import CTaosInterface + +class TDengineConnection(object): + """ TDengine connection object + """ + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + if len(kwargs) > 0: + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + result = self._chandle.useResult(self._conn)[0] + if result: + self._chandle.freeResult(result) + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() print("Hello world") \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cf10d5ada578687689b94454378dd543368e3e --- /dev/null +++ b/src/connector/python/windows/python3/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/inc/hash.h b/src/inc/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..54a43fb6ebc3f692c642e1270a948016b4244194 --- /dev/null +++ b/src/inc/hash.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HASH_H +#define TDENGINE_HASH_H + +#include "hashutil.h" + +#define HASH_MAX_CAPACITY (1024 * 1024 * 16) +#define HASH_VALUE_IN_TRASH (-1) +#define HASH_DEFAULT_LOAD_FACTOR (0.75) +#define HASH_INDEX(v, c) ((v) & ((c)-1)) + +typedef struct SHashNode { + char *key; // null-terminated string + union { + struct SHashNode * prev; + struct SHashEntry *prev1; + }; + + struct SHashNode *next; + uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash + uint32_t keyLen; // length of the key + char data[]; +} SHashNode; + +typedef struct SHashEntry { + SHashNode *next; + uint32_t num; +} SHashEntry; + +typedef struct HashObj { + SHashEntry **hashList; + uint32_t capacity; + int size; + _hash_fn_t hashFp; + bool multithreadSafe; // enable lock + +#if defined LINUX + pthread_rwlock_t lock; +#else + pthread_mutex_t lock; +#endif + +} HashObj; + +void *taosInitHashTable(uint32_t capacity, _hash_fn_t fn, bool multithreadSafe); + +int32_t taosAddToHashTable(HashObj *pObj, const char *key, uint32_t keyLen, void *data, uint32_t size); +void taosDeleteFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen); + +char *taosGetDataFromHash(HashObj *pObj, const char *key, uint32_t keyLen); + +void taosCleanUpHashTable(void *handle); + +int32_t taosGetHashMaxOverflowLength(HashObj *pObj); + +int32_t taosCheckHashTable(HashObj *pObj); + +#endif // TDENGINE_HASH_H diff --git a/src/inc/hashutil.h b/src/inc/hashutil.h new file mode 100644 index 0000000000000000000000000000000000000000..047f1889d78d6f8559bd0e320a0e9bae2beaa681 --- /dev/null +++ b/src/inc/hashutil.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HASHUTIL_H +#define TDENGINE_HASHUTIL_H + +#include "os.h" + +typedef uint32_t (*_hash_fn_t)(const char *, uint32_t); + +/** + * murmur hash algorithm + * @key usually string + * @len key length + * @seed hash seed + * @out an int32 value + */ +uint32_t MurmurHash3_32(const char *key, uint32_t len); + +/** + * + * @param key + * @param len + * @return + */ +uint32_t taosIntHash_32(const char *key, uint32_t len); + +uint32_t taosIntHash_64(const char *key, uint32_t len); + +_hash_fn_t taosGetDefaultHashFunction(int32_t type); + +#endif //TDENGINE_HASHUTIL_H diff --git a/src/inc/sql.y b/src/inc/sql.y index dd609e5ee380c0239200c36c4d2bdc00e3de26a6..2b7e0b628cb0cf951db287722036523749e0fdd8 100644 --- a/src/inc/sql.y +++ b/src/inc/sql.y @@ -31,7 +31,7 @@ } %syntax_error { - pInfo->validSql = false; + pInfo->valid = false; int32_t outputBufLen = tListLen(pInfo->pzErrMsg); int32_t len = 0; @@ -59,25 +59,25 @@ program ::= cmd. {} //////////////////////////////////THE SHOW STATEMENT/////////////////////////////////////////// -cmd ::= SHOW DATABASES. { setDCLSQLElems(pInfo, SHOW_DATABASES, 0);} -cmd ::= SHOW MNODES. { setDCLSQLElems(pInfo, SHOW_MNODES, 0);} -cmd ::= SHOW DNODES. { setDCLSQLElems(pInfo, SHOW_DNODES, 0);} -cmd ::= SHOW ACCOUNTS. { setDCLSQLElems(pInfo, SHOW_ACCOUNTS, 0);} -cmd ::= SHOW USERS. { setDCLSQLElems(pInfo, SHOW_USERS, 0);} - -cmd ::= SHOW MODULES. { setDCLSQLElems(pInfo, SHOW_MODULES, 0); } -cmd ::= SHOW QUERIES. { setDCLSQLElems(pInfo, SHOW_QUERIES, 0); } -cmd ::= SHOW CONNECTIONS.{ setDCLSQLElems(pInfo, SHOW_CONNECTIONS, 0);} -cmd ::= SHOW STREAMS. { setDCLSQLElems(pInfo, SHOW_STREAMS, 0); } -cmd ::= SHOW CONFIGS. { setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); } -cmd ::= SHOW SCORES. { setDCLSQLElems(pInfo, SHOW_SCORES, 0); } -cmd ::= SHOW GRANTS. { setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } - -cmd ::= SHOW VNODES. { setDCLSQLElems(pInfo, SHOW_VNODES, 0); } -cmd ::= SHOW VNODES IPTOKEN(X). { setDCLSQLElems(pInfo, SHOW_VNODES, 1, &X); } +cmd ::= SHOW DATABASES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} +cmd ::= SHOW MNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} +cmd ::= SHOW DNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} +cmd ::= SHOW ACCOUNTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} +cmd ::= SHOW USERS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} + +cmd ::= SHOW MODULES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } +cmd ::= SHOW QUERIES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } +cmd ::= SHOW CONNECTIONS.{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} +cmd ::= SHOW STREAMS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } +cmd ::= SHOW CONFIGS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_CONFIGS, 0, 0); } +cmd ::= SHOW SCORES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } +cmd ::= SHOW GRANTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } + +cmd ::= SHOW VNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } +cmd ::= SHOW VNODES IPTOKEN(X). { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &X, 0); } %type dbPrefix {SSQLToken} -dbPrefix(A) ::=. {A.n = 0;} +dbPrefix(A) ::=. {A.n = 0; A.type = 0;} dbPrefix(A) ::= ids(X) DOT. {A = X; } %type cpxName {SSQLToken} @@ -85,66 +85,66 @@ cpxName(A) ::= . {A.n = 0; } cpxName(A) ::= DOT ids(Y). {A = Y; A.n += 1; } cmd ::= SHOW dbPrefix(X) TABLES. { - setDCLSQLElems(pInfo, SHOW_TABLES, 1, &X); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &X, 0); } cmd ::= SHOW dbPrefix(X) TABLES LIKE ids(Y). { - setDCLSQLElems(pInfo, SHOW_TABLES, 2, &X, &Y); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &X, &Y); } cmd ::= SHOW dbPrefix(X) STABLES. { - setDCLSQLElems(pInfo, SHOW_STABLES, 1, &X); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &X, 0); } cmd ::= SHOW dbPrefix(X) STABLES LIKE ids(Y). { SSQLToken token; setDBName(&token, &X); - setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &Y); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &Y); } cmd ::= SHOW dbPrefix(X) VGROUPS. { SSQLToken token; setDBName(&token, &X); - setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). { SSQLToken token; setDBName(&token, &X); - setDCLSQLElems(pInfo, SHOW_VGROUPS, 2, &token, &Y); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y); } //drop configure for tables cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). { X.n += Z.n; - setDCLSQLElems(pInfo, DROP_TABLE, 2, &X, &Y); + setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y); } -cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDCLSQLElems(pInfo, DROP_DATABASE, 2, &X, &Y); } -cmd ::= DROP DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, DROP_DNODE, 1, &X); } -cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, DROP_USER, 1, &X); } -cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, DROP_ACCOUNT, 1, &X); } +cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y); } +cmd ::= DROP DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &X); } +cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &X); } +cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &X); } /////////////////////////////////THE USE STATEMENT////////////////////////////////////////// -cmd ::= USE ids(X). { setDCLSQLElems(pInfo, USE_DATABASE, 1, &X);} +cmd ::= USE ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_USE_DB, 1, &X);} /////////////////////////////////THE DESCRIBE STATEMENT///////////////////////////////////// cmd ::= DESCRIBE ids(X) cpxName(Y). { X.n += Y.n; - setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &X); + setDCLSQLElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &X); } /////////////////////////////////THE ALTER STATEMENT//////////////////////////////////////// -cmd ::= ALTER USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &X, &Y); } -cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &X, &Y);} -cmd ::= ALTER DNODE IPTOKEN(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_DNODE, 2, &X, &Y); } -cmd ::= ALTER DNODE IPTOKEN(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, ALTER_DNODE, 3, &X, &Y, &Z); } -cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &X); } -cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_LOCAL, 2, &X, &Y); } -cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &X, &Y, &t);} +cmd ::= ALTER USER ids(X) PASS ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &X, &Y, NULL); } +cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &X, NULL, &Y);} +cmd ::= ALTER DNODE IPTOKEN(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &X, &Y); } +cmd ::= ALTER DNODE IPTOKEN(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &X, &Y, &Z); } +cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &X); } +cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &X, &Y); } +cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &X, &Y, &t);} -cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { SSQLToken t = {0}; setCreateAcctSQL(pInfo, ALTER_ACCT, &X, &t, &Z);} -cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, ALTER_ACCT, &X, &Y, &Z);} +cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, NULL, &Z);} +cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, &Y, &Z);} // An IDENTIFIER can be a generic identifier, or one of several keywords. // Any non-standard keyword can also be an identifier. @@ -163,11 +163,11 @@ ifnotexists(X) ::= . {X.n = 0;} /////////////////////////////////THE CREATE STATEMENT/////////////////////////////////////// //create option for dnode/db/user/account -cmd ::= CREATE DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, CREATE_DNODE, 1, &X);} +cmd ::= CREATE DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &X);} cmd ::= CREATE ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). - { setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &X, &Y, &Z);} -cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, CREATE_DATABASE, &X, &Y, &Z);} -cmd ::= CREATE USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, CREATE_USER, 2, &X, &Y);} + { setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &X, &Y, &Z);} +cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &X, &Y, &Z);} +cmd ::= CREATE USER ids(X) PASS ids(Y). { setCreateUserSQL(pInfo, &X, &Y);} pps(Y) ::= . {Y.n = 0; } pps(Y) ::= PPS INTEGER(X). {Y = X; } @@ -198,14 +198,14 @@ state(Y) ::= STATE ids(X). {Y = X; } %type acct_optr {SCreateAcctSQL} acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K) conns(L) state(M). { - Y.users = (K.n>0)?atoi(K.z):-1; - Y.dbs = (E.n>0)?atoi(E.z):-1; - Y.tseries = (D.n>0)?atoi(D.z):-1; - Y.streams = (F.n>0)?atoi(F.z):-1; - Y.pps = (C.n>0)?atoi(C.z):-1; - Y.storage = (P.n>0)?strtoll(P.z, NULL, 10):-1; - Y.qtime = (Q.n>0)?strtoll(Q.z, NULL, 10):-1; - Y.conns = (L.n>0)?atoi(L.z):-1; + Y.maxUsers = (K.n>0)?atoi(K.z):-1; + Y.maxDbs = (E.n>0)?atoi(E.z):-1; + Y.maxTimeSeries = (D.n>0)?atoi(D.z):-1; + Y.maxStreams = (F.n>0)?atoi(F.z):-1; + Y.maxPointsPerSecond = (C.n>0)?atoi(C.z):-1; + Y.maxStorage = (P.n>0)?strtoll(P.z, NULL, 10):-1; + Y.maxQueryTime = (Q.n>0)?strtoll(Q.z, NULL, 10):-1; + Y.maxConnections = (L.n>0)?atoi(L.z):-1; Y.stat = M; } @@ -270,29 +270,29 @@ cmd ::= CREATE TABLE ifnotexists(Y) ids(X) cpxName(Z) create_table_args. { %type create_table_args{SCreateTableSQL*} create_table_args(A) ::= LP columnlist(X) RP. { - A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METER); + A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } -// create metric +// create super table create_table_args(A) ::= LP columnlist(X) RP TAGS LP columnlist(Y) RP. { - A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METRIC); + A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_STABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } -// create meter by using metric -// create meter meter_name using metric_name tags(tag_values1, tag_values2) +// create table by using super table +// create table table_name using super_table_name tags(tag_values1, tag_values2) create_table_args(A) ::= USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. { X.n += F.n; - A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_METER_FROM_METRIC); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_METER_FROM_METRIC); + A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_TABLE_FROM_STABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } // create stream -// create table table_name as select count(*) from metric_name interval(time) +// create table table_name as select count(*) from super_table_name interval(time) create_table_args(A) ::= AS select(S). { A = tSetCreateSQLElems(NULL, NULL, NULL, NULL, S, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } %type column{TAOS_FIELD} @@ -349,16 +349,22 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). { } //////////////////////// The SELECT statement ///////////////////////////////// -cmd ::= select(X). { - setSQLInfo(pInfo, X, NULL, TSQL_QUERY_METER); -} - %type select {SQuerySQL*} -%destructor select {destroyQuerySql($$);} +%destructor select {doDestroyQuerySql($$);} select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { A = tSetQuerySQLElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G); } +%type union {SSubclauseInfo*} +%destructor union {destroyAllSelectClause($$);} + +union(Y) ::= select(X). { Y = setSubclause(NULL, X); } +union(Y) ::= LP union(X) RP. { Y = X; } +union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); } +union(Y) ::= union(Z) UNION ALL LP select(X) RP. { Y = appendSelectClause(Z, X); } + +cmd ::= union(X). { setSQLInfo(pInfo, X, NULL, TSDB_SQL_SELECT); } + // Support for the SQL exprssion without from & where subclauses, e.g., // select current_database(), // select server_version(), select client_version(), @@ -578,34 +584,14 @@ exprlist(A) ::= expritem(X). {A = tSQLExprListAppend(0,X,0);} expritem(A) ::= expr(X). {A = X;} expritem(A) ::= . {A = 0;} -////////////////////////// The INSERT command ///////////////////////////////// -// add support "values() values() values() tags()" operation.... -cmd ::= INSERT INTO cpxName(X) insert_value_list(K). { - tSetInsertSQLElems(pInfo, &X, K); -} - -%type insert_value_list {tSQLExprListList*} -insert_value_list(X) ::= VALUES LP itemlist(Y) RP. {X = tSQLListListAppend(NULL, Y);} -insert_value_list(X) ::= insert_value_list(K) VALUES LP itemlist(Y) RP. -{X = tSQLListListAppend(K, Y);} - -//cmd ::= INSERT INTO cpxName(X) select(S). -// {sqliteInsert(pParse, sqliteSrcListAppend(0,&X,&D), 0, S, F, R);} - -%type itemlist {tSQLExprList*} -%destructor itemlist {tSQLExprListDestroy($$);} - -itemlist(A) ::= itemlist(X) COMMA expr(Y). {A = tSQLExprListAppend(X,Y,0);} -itemlist(A) ::= expr(X). {A = tSQLExprListAppend(0,X,0);} - ///////////////////////////////////reset query cache////////////////////////////////////// -cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);} +cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} ///////////////////////////////////ALTER TABLE statement////////////////////////////////// cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). { X.n += F.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_ADD_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { @@ -614,15 +600,15 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { toTSDBType(A.type); tVariantList* K = tVariantListAppendToken(NULL, &A, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, ALTER_TABLE_DROP_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } //////////////////////////////////ALTER TAGS statement///////////////////////////////////// cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). { X.n += Y.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_TAGS_ADD); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; @@ -630,8 +616,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { toTSDBType(Y.type); tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_DROP); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { @@ -643,8 +629,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { toTSDBType(Z.type); A = tVariantListAppendToken(A, &Z, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_CHG); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { @@ -654,17 +640,18 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); A = tVariantListAppend(A, &Z, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_SET); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } ////////////////////////////////////////kill statement/////////////////////////////////////// -cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &X);} -cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &X);} -cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &X);} +cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &X);} +cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);} +cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);} %fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL COUNT SUM AVG MIN MAX FIRST LAST TOP BOTTOM STDDEV PERCENTILE APERCENTILE LEASTSQUARES HISTOGRAM DIFF - SPREAD TWA INTERP LAST_ROW NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT METRIC TBNAME JOIN METRICS STABLE NULL. + SPREAD TWA INTERP LAST_ROW RATE IRATE SUM_RATE SUM_IRATE AVG_RATE AVG_IRATE NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT + METRIC TBNAME JOIN METRICS STABLE NULL INSERT INTO VALUES. diff --git a/src/inc/taos.h b/src/inc/taos.h index 2fd6d8be927a310e0131b62a8f0ecf55ae943ef2..d9db79fbcb74e6935ab052c702b72129cd4cc132 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -57,10 +57,16 @@ typedef struct taosField { char type; } TAOS_FIELD; -void taos_init(); -int taos_options(TSDB_OPTION option, const void *arg, ...); -TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); -void taos_close(TAOS *taos); +#ifdef _TD_GO_DLL_ + #define DLL_EXPORT __declspec(dllexport) +#else + #define DLL_EXPORT +#endif + +DLL_EXPORT void taos_init(); +DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); +DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); +DLL_EXPORT void taos_close(TAOS *taos); typedef struct TAOS_BIND { int buffer_type; @@ -80,18 +86,18 @@ int taos_stmt_execute(TAOS_STMT *stmt); TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); int taos_stmt_close(TAOS_STMT *stmt); -int taos_query(TAOS *taos, const char *sql); -TAOS_RES *taos_use_result(TAOS *taos); -TAOS_ROW taos_fetch_row(TAOS_RES *res); -int taos_result_precision(TAOS_RES *res); // get the time precision of result -void taos_free_result(TAOS_RES *res); -int taos_field_count(TAOS *taos); -int taos_num_fields(TAOS_RES *res); -int taos_affected_rows(TAOS *taos); -TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); -int taos_select_db(TAOS *taos, const char *db); -int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -void taos_stop_query(TAOS_RES *res); +DLL_EXPORT int taos_query(TAOS *taos, const char *sql); +DLL_EXPORT TAOS_RES *taos_use_result(TAOS *taos); +DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); +DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result +DLL_EXPORT void taos_free_result(TAOS_RES *res); +DLL_EXPORT int taos_field_count(TAOS *taos); +DLL_EXPORT int taos_num_fields(TAOS_RES *res); +DLL_EXPORT int taos_affected_rows(TAOS *taos); +DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); +DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); +DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +DLL_EXPORT void taos_stop_query(TAOS_RES *res); int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); int taos_validate_sql(TAOS *taos, const char *sql); @@ -100,27 +106,26 @@ int taos_validate_sql(TAOS *taos, const char *sql); // TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild); // TODO: the return value should be `const` -char *taos_get_server_info(TAOS *taos); -char *taos_get_client_info(); -char *taos_errstr(TAOS *taos); +DLL_EXPORT char *taos_get_server_info(TAOS *taos); +DLL_EXPORT char *taos_get_client_info(); +DLL_EXPORT char *taos_errstr(TAOS *taos); -int taos_errno(TAOS *taos); +DLL_EXPORT int taos_errno(TAOS *taos); -void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param); -void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); -void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); +DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param); +DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); +DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); -TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *table, int64_t time, int mseconds); -TAOS_ROW taos_consume(TAOS_SUB *tsub); -void taos_unsubscribe(TAOS_SUB *tsub); -int taos_subfields_count(TAOS_SUB *tsub); -TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub); +typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code); +DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval); +DLL_EXPORT TAOS_RES *taos_consume(TAOS_SUB *tsub); +DLL_EXPORT void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress); -TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), +DLL_EXPORT TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)); -void taos_close_stream(TAOS_STREAM *tstr); +DLL_EXPORT void taos_close_stream(TAOS_STREAM *tstr); -int taos_load_table_info(TAOS *taos, const char* tableNameList); +DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList); #ifdef __cplusplus } diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index e1a7eb9b4b9828c82489114bf8689147993bd38a..edf0ab24a169c0a24ab1f8e554e7c0282c92a47f 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -91,7 +91,7 @@ extern "C" { #define TSDB_CODE_INVALID_QHANDLE 70 #define TSDB_CODE_RELATED_TABLES_EXIST 71 #define TSDB_CODE_MONITOR_DB_FORBEIDDEN 72 -#define TSDB_CODE_VG_COMMITLOG_INIT_FAILED 73 +#define TSDB_CODE_NO_DISK_PERMISSIONS 73 #define TSDB_CODE_VG_INIT_FAILED 74 #define TSDB_CODE_DATA_ALREADY_IMPORTED 75 #define TSDB_CODE_OPS_NOT_SUPPORT 76 @@ -125,7 +125,7 @@ extern "C" { #define TSDB_CODE_BATCH_SIZE_TOO_BIG 104 #define TSDB_CODE_TIMESTAMP_OUT_OF_RANGE 105 #define TSDB_CODE_INVALID_QUERY_MSG 106 // failed to validate the sql expression msg by vnode -#define TSDB_CODE_CACHE_BLOCK_TS_DISORDERED 107 // time stamp in cache block is disordered +#define TSDB_CODE_SORTED_RES_TOO_MANY 107 // too many result for ordered super table projection query #define TSDB_CODE_FILE_BLOCK_TS_DISORDERED 108 // time stamp in file block is disordered #define TSDB_CODE_INVALID_COMMIT_LOG 109 // commit log init failed #define TSDB_CODE_SERV_NO_DISKSPACE 110 @@ -137,7 +137,7 @@ extern "C" { #define TSDB_CODE_INVALID_VNODE_STATUS 116 #define TSDB_CODE_FAILED_TO_LOCK_RESOURCES 117 #define TSDB_CODE_TABLE_ID_MISMATCH 118 -#define TSDB_CODE_QUERY_CACHE_ERASED 119 +#define TSDB_CODE_QUERY_CACHE_ERASED 119 #define TSDB_CODE_MAX_ERROR_CODE 120 diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 22b10eaa60b1fe871539c9bdbe87db5d3fce2709..883906b617053af79dd7292246a315a5fe13f8b7 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -74,10 +74,10 @@ extern "C" { #define TSDB_MSG_TYPE_CREATE_MNODE_RSP 44 #define TSDB_MSG_TYPE_DROP_MNODE 45 #define TSDB_MSG_TYPE_DROP_MNODE_RSP 46 -#define TSDB_MSG_TYPE_CREATE_PNODE 47 -#define TSDB_MSG_TYPE_CREATE_PNODE_RSP 48 -#define TSDB_MSG_TYPE_DROP_PNODE 49 -#define TSDB_MSG_TYPE_DROP_PNODE_RSP 50 +#define TSDB_MSG_TYPE_CREATE_DNODE 47 +#define TSDB_MSG_TYPE_CREATE_DNODE_RSP 48 +#define TSDB_MSG_TYPE_DROP_DNODE 49 +#define TSDB_MSG_TYPE_DROP_DNODE_RSP 50 #define TSDB_MSG_TYPE_CREATE_DB 51 #define TSDB_MSG_TYPE_CREATE_DB_RSP 52 #define TSDB_MSG_TYPE_DROP_DB 53 @@ -147,7 +147,7 @@ enum _mgmt_table { TSDB_MGMT_TABLE_USER, TSDB_MGMT_TABLE_DB, TSDB_MGMT_TABLE_TABLE, - TSDB_MGMT_TABLE_PNODE, + TSDB_MGMT_TABLE_DNODE, TSDB_MGMT_TABLE_MNODE, TSDB_MGMT_TABLE_VGROUP, TSDB_MGMT_TABLE_METRIC, @@ -312,7 +312,7 @@ typedef struct { typedef struct { char db[TSDB_METER_ID_LEN]; - short ignoreNotExists; + uint8_t ignoreNotExists; } SDropDbMsg, SUseDbMsg; typedef struct { @@ -490,6 +490,7 @@ typedef struct SColumnInfo { typedef struct SMeterSidExtInfo { int32_t sid; int64_t uid; + TSKEY key; // key for subscription char tags[]; } SMeterSidExtInfo; @@ -506,7 +507,6 @@ typedef struct { uint64_t uid; TSKEY skey; TSKEY ekey; - int32_t num; int16_t order; int16_t orderColId; @@ -515,7 +515,8 @@ typedef struct { char intervalTimeUnit; // time interval type, for revisement of interval(1d) int64_t nAggTimeInterval; // time interval for aggregation, in million second - + int64_t slidingTime; // value for sliding window + // tag schema, used to parse tag information in pSidExtInfo uint64_t pTagSchema; diff --git a/src/inc/tcache.h b/src/inc/tcache.h index 93bbf22cd3752589731375a32da3da78c635b956..b577c53ea8dbcdc9f069288b94d0244907e77f12 100644 --- a/src/inc/tcache.h +++ b/src/inc/tcache.h @@ -86,6 +86,26 @@ void taosCleanUpDataCache(void *handle); */ void taosClearDataCache(void *handle); +/** + * Add one reference count for the exist data, and assign this data for a new owner. + * The new owner needs to invoke the taosRemoveDataFromCache when it does not need this data anymore. + * This procedure is a faster version of taosGetDataFromCache function, which avoids the sideeffect of the problem of the + * data is moved to trash, and taosGetDataFromCache will fail to retrieve it again. + * + * @param handle + * @param data + * @return + */ +void* taosGetDataFromExists(void* handle, void* data); + +/** + * transfer the ownership of data in cache to another object without increasing reference count. + * @param handle + * @param data + * @return + */ +void* taosTransferDataInCache(void* handle, void** data); + #ifdef __cplusplus } #endif diff --git a/src/inc/tglobalcfg.h b/src/inc/tglobalcfg.h index c4407f7d55487689c84e5484fbb72749a62834e2..35cf6a42443ef40135c3937867339c6634c32140 100644 --- a/src/inc/tglobalcfg.h +++ b/src/inc/tglobalcfg.h @@ -54,6 +54,7 @@ extern char tsDirectory[]; extern char dataDir[]; extern char logDir[]; extern char scriptDir[]; +extern char osName[]; extern char tsMasterIp[]; extern char tsSecondIp[]; @@ -78,8 +79,8 @@ extern char tsPrivateIp[]; extern short tsNumOfVnodesPerCore; extern short tsNumOfTotalVnodes; extern short tsCheckHeaderFile; -extern uint32_t tsServerIp; extern uint32_t tsPublicIpInt; +extern short tsAffectedRowsMod; extern int tsSessionsPerVnode; extern int tsAverageCacheBlocks; @@ -126,6 +127,7 @@ extern int tsEnableMonitorModule; extern int tsRestRowLimit; extern int tsCompressMsgSize; extern int tsMaxSQLStringLen; +extern int tsMaxNumOfOrderedResults; extern char tsSocketType[4]; @@ -135,6 +137,7 @@ extern int tsMinIntervalTime; extern int tsMaxStreamComputDelay; extern int tsStreamCompStartDelay; extern int tsStreamCompRetryDelay; +extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window extern int tsProjectExecInterval; extern int64_t tsMaxRetentWindow; diff --git a/src/inc/tinterpolation.h b/src/inc/tinterpolation.h index 40b8c5cb2f85ad6cc403bce4159bcab12f3c1476..22b4ceb4f06799e826c2711c7317b1ebd3197e9d 100644 --- a/src/inc/tinterpolation.h +++ b/src/inc/tinterpolation.h @@ -38,13 +38,13 @@ typedef struct SPoint { void * val; } SPoint; -typedef void (*__interpo_callback_fn_t)(void *param); - int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t timeRange, char intervalTimeUnit, int16_t precision); void taosInitInterpoInfo(SInterpolationInfo *pInterpoInfo, int32_t order, int64_t startTimeStamp, int32_t numOfTags, int32_t rowSize); +void taosDestoryInterpoInfo(SInterpolationInfo *pInterpoInfo); + void taosInterpoSetStartInfo(SInterpolationInfo *pInterpoInfo, int32_t numOfRawDataInRows, int32_t type); TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int32_t timeInterval, int8_t intervalTimeUnit, int8_t precision); @@ -78,8 +78,8 @@ int32_t taosNumOfRemainPoints(SInterpolationInfo *pInterpoInfo); */ int32_t taosDoInterpoResult(SInterpolationInfo *pInterpoInfo, int16_t interpoType, tFilePage **data, int32_t numOfRawDataInRows, int32_t outputRows, int64_t nInterval, - int64_t *pPrimaryKeyArray, tColModel *pModel, char **srcData, int64_t *defaultVal, - int32_t *functionIDs, int32_t bufSize); + const int64_t *pPrimaryKeyArray, tColModel *pModel, char **srcData, int64_t *defaultVal, + const int32_t *functionIDs, int32_t bufSize); int taosDoLinearInterpolation(int32_t type, SPoint *point1, SPoint *point2, SPoint *point); diff --git a/src/inc/tlog.h b/src/inc/tlog.h index 0d348c27ce8056fd554ab3ea0e2c60e89920b882..7556cc50a1ff23d24ef8eafdcd677f65577c8713 100644 --- a/src/inc/tlog.h +++ b/src/inc/tlog.h @@ -113,7 +113,10 @@ extern uint32_t cdebugFlag; } #define tscPrint(...) \ { tprintf("TSC ", 255, __VA_ARGS__); } - +#define tscDump(...) \ + if (cdebugFlag & DEBUG_TRACE) { \ + taosPrintLongString("TSC ", cdebugFlag, __VA_ARGS__); \ + } #define jniError(...) \ if (jnidebugFlag & DEBUG_ERROR) { \ tprintf("ERROR JNI ", jnidebugFlag, __VA_ARGS__); \ diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 953be70709cc1dd9fdcacec7815e02ff877ac9db..aa712ab62296c806b530cea14bacba525530d93d 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -190,6 +190,7 @@ extern "C" { #define TSDB_MAX_TABLES_PER_VNODE 220000 #define TSDB_MAX_JOIN_TABLE_NUM 5 +#define TSDB_MAX_UNION_CLAUSE 5 #define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) #define TSDB_MAX_NCHAR_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) @@ -211,7 +212,7 @@ extern "C" { #define TSDB_MAX_RPC_THREADS 5 -#define TSDB_QUERY_TYPE_QUERY 0 // normal query +#define TSDB_QUERY_TYPE_NON_TYPE 0x00U // none type #define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01U // free qhandle at vnode /* @@ -227,6 +228,13 @@ extern "C" { #define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40U // select *,columns... query #define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80U // join sub query at the second stage +#define TSDB_QUERY_TYPE_INSERT 0x100U // insert type +#define TSDB_QUERY_TYPE_IMPORT 0x200U // import data + +#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0) +#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type)) +#define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE) + #define TSQL_SO_ASC 1 #define TSQL_SO_DESC 0 diff --git a/src/inc/tsqldef.h b/src/inc/tsqldef.h index ea0500eb866f65dfe3ef77d0f7b318b2bcbdec0f..182860e67cf8df81401e8c065700b9d9fd6fa387 100644 --- a/src/inc/tsqldef.h +++ b/src/inc/tsqldef.h @@ -119,97 +119,106 @@ #define TK_COMMA 101 #define TK_NULL 102 #define TK_SELECT 103 -#define TK_FROM 104 -#define TK_VARIABLE 105 -#define TK_INTERVAL 106 -#define TK_FILL 107 -#define TK_SLIDING 108 -#define TK_ORDER 109 -#define TK_BY 110 -#define TK_ASC 111 -#define TK_DESC 112 -#define TK_GROUP 113 -#define TK_HAVING 114 -#define TK_LIMIT 115 -#define TK_OFFSET 116 -#define TK_SLIMIT 117 -#define TK_SOFFSET 118 -#define TK_WHERE 119 -#define TK_NOW 120 -#define TK_INSERT 121 -#define TK_INTO 122 -#define TK_VALUES 123 -#define TK_RESET 124 -#define TK_QUERY 125 -#define TK_ADD 126 -#define TK_COLUMN 127 -#define TK_TAG 128 -#define TK_CHANGE 129 -#define TK_SET 130 -#define TK_KILL 131 -#define TK_CONNECTION 132 -#define TK_COLON 133 -#define TK_STREAM 134 -#define TK_ABORT 135 -#define TK_AFTER 136 -#define TK_ATTACH 137 -#define TK_BEFORE 138 -#define TK_BEGIN 139 -#define TK_CASCADE 140 -#define TK_CLUSTER 141 -#define TK_CONFLICT 142 -#define TK_COPY 143 -#define TK_DEFERRED 144 -#define TK_DELIMITERS 145 -#define TK_DETACH 146 -#define TK_EACH 147 -#define TK_END 148 -#define TK_EXPLAIN 149 -#define TK_FAIL 150 -#define TK_FOR 151 -#define TK_IGNORE 152 -#define TK_IMMEDIATE 153 -#define TK_INITIALLY 154 -#define TK_INSTEAD 155 -#define TK_MATCH 156 -#define TK_KEY 157 -#define TK_OF 158 -#define TK_RAISE 159 -#define TK_REPLACE 160 -#define TK_RESTRICT 161 -#define TK_ROW 162 -#define TK_STATEMENT 163 -#define TK_TRIGGER 164 -#define TK_VIEW 165 -#define TK_ALL 166 -#define TK_COUNT 167 -#define TK_SUM 168 -#define TK_AVG 169 -#define TK_MIN 170 -#define TK_MAX 171 -#define TK_FIRST 172 -#define TK_LAST 173 -#define TK_TOP 174 -#define TK_BOTTOM 175 -#define TK_STDDEV 176 -#define TK_PERCENTILE 177 -#define TK_APERCENTILE 178 -#define TK_LEASTSQUARES 179 -#define TK_HISTOGRAM 180 -#define TK_DIFF 181 -#define TK_SPREAD 182 -#define TK_TWA 183 -#define TK_INTERP 184 -#define TK_LAST_ROW 185 -#define TK_SEMI 186 -#define TK_NONE 187 -#define TK_PREV 188 -#define TK_LINEAR 189 -#define TK_IMPORT 190 -#define TK_METRIC 191 -#define TK_TBNAME 192 -#define TK_JOIN 193 -#define TK_METRICS 194 -#define TK_STABLE 195 +#define TK_UNION 104 +#define TK_ALL 105 +#define TK_FROM 106 +#define TK_VARIABLE 107 +#define TK_INTERVAL 108 +#define TK_FILL 109 +#define TK_SLIDING 110 +#define TK_ORDER 111 +#define TK_BY 112 +#define TK_ASC 113 +#define TK_DESC 114 +#define TK_GROUP 115 +#define TK_HAVING 116 +#define TK_LIMIT 117 +#define TK_OFFSET 118 +#define TK_SLIMIT 119 +#define TK_SOFFSET 120 +#define TK_WHERE 121 +#define TK_NOW 122 +#define TK_RESET 123 +#define TK_QUERY 124 +#define TK_ADD 125 +#define TK_COLUMN 126 +#define TK_TAG 127 +#define TK_CHANGE 128 +#define TK_SET 129 +#define TK_KILL 130 +#define TK_CONNECTION 131 +#define TK_COLON 132 +#define TK_STREAM 133 +#define TK_ABORT 134 +#define TK_AFTER 135 +#define TK_ATTACH 136 +#define TK_BEFORE 137 +#define TK_BEGIN 138 +#define TK_CASCADE 139 +#define TK_CLUSTER 140 +#define TK_CONFLICT 141 +#define TK_COPY 142 +#define TK_DEFERRED 143 +#define TK_DELIMITERS 144 +#define TK_DETACH 145 +#define TK_EACH 146 +#define TK_END 147 +#define TK_EXPLAIN 148 +#define TK_FAIL 149 +#define TK_FOR 150 +#define TK_IGNORE 151 +#define TK_IMMEDIATE 152 +#define TK_INITIALLY 153 +#define TK_INSTEAD 154 +#define TK_MATCH 155 +#define TK_KEY 156 +#define TK_OF 157 +#define TK_RAISE 158 +#define TK_REPLACE 159 +#define TK_RESTRICT 160 +#define TK_ROW 161 +#define TK_STATEMENT 162 +#define TK_TRIGGER 163 +#define TK_VIEW 164 +#define TK_COUNT 165 +#define TK_SUM 166 +#define TK_AVG 167 +#define TK_MIN 168 +#define TK_MAX 169 +#define TK_FIRST 170 +#define TK_LAST 171 +#define TK_TOP 172 +#define TK_BOTTOM 173 +#define TK_STDDEV 174 +#define TK_PERCENTILE 175 +#define TK_APERCENTILE 176 +#define TK_LEASTSQUARES 177 +#define TK_HISTOGRAM 178 +#define TK_DIFF 179 +#define TK_SPREAD 180 +#define TK_TWA 181 +#define TK_INTERP 182 +#define TK_LAST_ROW 183 +#define TK_RATE 184 +#define TK_IRATE 185 +#define TK_SUM_RATE 186 +#define TK_SUM_IRATE 187 +#define TK_AVG_RATE 188 +#define TK_AVG_IRATE 189 +#define TK_SEMI 190 +#define TK_NONE 191 +#define TK_PREV 192 +#define TK_LINEAR 193 +#define TK_IMPORT 194 +#define TK_METRIC 195 +#define TK_TBNAME 196 +#define TK_JOIN 197 +#define TK_METRICS 198 +#define TK_STABLE 199 +#define TK_INSERT 200 +#define TK_INTO 201 +#define TK_VALUES 202 #endif + + diff --git a/src/inc/tsqlfunction.h b/src/inc/tsqlfunction.h index 0ed6a9952ec5d8b9c3e006c092b58b6a7d644297..93f50cf4f3862e7d2747c10399858c3be0562072 100644 --- a/src/inc/tsqlfunction.h +++ b/src/inc/tsqlfunction.h @@ -162,8 +162,8 @@ typedef struct SExtTagsInfo { // sql function runtime context typedef struct SQLFunctionCtx { int32_t startOffset; - int32_t size; - int32_t order; + int32_t size; // number of rows + int32_t order; // asc|desc int32_t scanFlag; // TODO merge with currentStage int16_t inputType; diff --git a/src/inc/ttime.h b/src/inc/ttime.h index eae24a56b529a5f3d837cdb2df9d60a3064da69f..34c241cbc0f22afc511660cee475c82d08466599 100644 --- a/src/inc/ttime.h +++ b/src/inc/ttime.h @@ -42,6 +42,7 @@ int64_t taosGetTimestamp(int32_t precision); int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts); int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec); +void deltaToUtcInitOnce(); #ifdef __cplusplus } diff --git a/src/inc/ttypes.h b/src/inc/ttypes.h index 0f8eb2d58c31b5fe8f138bad64f5ff41b70cab9e..db6490f8404f2b9c0be4f83ce0391ec4dad39a81 100644 --- a/src/inc/ttypes.h +++ b/src/inc/ttypes.h @@ -73,7 +73,7 @@ void tVariantCreateFromBinary(tVariant *pVar, char *pz, uint32_t len, uint32_t t void tVariantDestroy(tVariant *pV); -void tVariantAssign(tVariant *pDst, tVariant *pSrc); +void tVariantAssign(tVariant *pDst, const tVariant *pSrc); int32_t tVariantToString(tVariant *pVar, char *dst); diff --git a/src/inc/tutil.h b/src/inc/tutil.h index 449e51bc238b802be02d381cee2313bdd4897177..b66da286973521c1e6cd29db2b2923cfc371be58 100644 --- a/src/inc/tutil.h +++ b/src/inc/tutil.h @@ -102,8 +102,8 @@ extern "C" { #define GET_FLOAT_VAL(x) taos_align_get_float(x) #define GET_DOUBLE_VAL(x) taos_align_get_double(x) - float taos_align_get_float(char* pBuf); - double taos_align_get_double(char* pBuf); + float taos_align_get_float(const char* pBuf); + double taos_align_get_double(const char* pBuf); //#define __float_align_declear() float __underlyFloat = 0.0; //#define __float_align_declear() @@ -162,15 +162,6 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP int32_t taosInitTimer(void (*callback)(int), int32_t ms); -/** - * murmur hash algorithm - * @key usually string - * @len key length - * @seed hash seed - * @out an int32 value - */ -uint32_t MurmurHash3_32(const void *key, int32_t len); - bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len); int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes); diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index 7442367e91dbd1f972b0bfa703720e6d15be0182..244eafb44ff2b187b0397eda241e5290dad53cd6 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -9,6 +9,7 @@ INCLUDE_DIRECTORIES(inc) IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(./src SRC) LIST(REMOVE_ITEM SRC ./src/shellWindows.c) + LIST(REMOVE_ITEM SRC ./src/shellDarwin.c) ADD_EXECUTABLE(shell ${SRC}) TARGET_LINK_LIBRARIES(shell taos_static) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) @@ -24,7 +25,9 @@ ELSEIF (TD_WINDOWS_64) ELSEIF (TD_DARWIN_64) LIST(APPEND SRC ./src/shellEngine.c) LIST(APPEND SRC ./src/shellMain.c) - LIST(APPEND SRC ./src/shellWindows.c) + LIST(APPEND SRC ./src/shellDarwin.c) + LIST(APPEND SRC ./src/shellCommand.c) + LIST(APPEND SRC ./src/shellImport.c) ADD_EXECUTABLE(shell ${SRC}) TARGET_LINK_LIBRARIES(shell taos_static) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c new file mode 100644 index 0000000000000000000000000000000000000000..b624f5ee68535026580af25aa962a8f6a79f963e --- /dev/null +++ b/src/kit/shell/src/shellDarwin.c @@ -0,0 +1,532 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define __USE_XOPEN + +#include "os.h" + +#include "shell.h" +#include "shellCommand.h" +#include "tkey.h" + +#define OPT_ABORT 1 /* �Cabort */ + +int indicator = 1; +struct termios oldtio; + +extern int wcwidth(wchar_t c); +void insertChar(Command *cmd, char *c, int size); + + +void printHelp() { + char indent[10] = " "; + printf("taos shell is used to test the TDEngine database\n"); + + printf("%s%s\n", indent, "-h"); + printf("%s%s%s\n", indent, indent, "TDEngine server IP address to connect. The default host is localhost."); + printf("%s%s\n", indent, "-p"); + printf("%s%s%s\n", indent, indent, "The password to use when connecting to the server."); + printf("%s%s\n", indent, "-P"); + printf("%s%s%s\n", indent, indent, "The TCP/IP port number to use for the connection"); + printf("%s%s\n", indent, "-u"); + printf("%s%s%s\n", indent, indent, "The TDEngine user name to use when connecting to the server."); + printf("%s%s\n", indent, "-c"); + printf("%s%s%s\n", indent, indent, "Configuration directory."); + printf("%s%s\n", indent, "-s"); + printf("%s%s%s\n", indent, indent, "Commands to run without enter the shell."); + printf("%s%s\n", indent, "-r"); + printf("%s%s%s\n", indent, indent, "Output time as unsigned long.."); + printf("%s%s\n", indent, "-f"); + printf("%s%s%s\n", indent, indent, "Script to run without enter the shell."); + printf("%s%s\n", indent, "-d"); + printf("%s%s%s\n", indent, indent, "Database to use when connecting to the server."); + printf("%s%s\n", indent, "-t"); + printf("%s%s%s\n", indent, indent, "Time zone of the shell, default is local."); + printf("%s%s\n", indent, "-D"); + printf("%s%s%s\n", indent, indent, "Use multi-thread to import all SQL files in the directory separately."); + printf("%s%s\n", indent, "-T"); + printf("%s%s%s\n", indent, indent, "Number of threads when using multi-thread to import data."); + + exit(EXIT_SUCCESS); +} + +void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { + wordexp_t full_path; + for (int i = 1; i < argc; i++) { + // for host + if (strcmp(argv[i], "-h") == 0) { + if (i < argc - 1) { + arguments->host = argv[++i]; + } else { + fprintf(stderr, "option -h requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for password + else if (strcmp(argv[i], "-p") == 0) { + arguments->is_use_passwd = true; + } + // for management port + else if (strcmp(argv[i], "-P") == 0) { + if (i < argc - 1) { + tsMgmtShellPort = atoi(argv[++i]); + } else { + fprintf(stderr, "option -P requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for user + else if (strcmp(argv[i], "-u") == 0) { + if (i < argc - 1) { + arguments->user = argv[++i]; + } else { + fprintf(stderr, "option -u requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-c") == 0) { + if (i < argc - 1) { + strcpy(configDir, argv[++i]); + } else { + fprintf(stderr, "Option -c requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-s") == 0) { + if (i < argc - 1) { + arguments->commands = argv[++i]; + } else { + fprintf(stderr, "option -s requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-r") == 0) { + arguments->is_raw_time = true; + } + // For temperory batch commands to run TODO + else if (strcmp(argv[i], "-f") == 0) { + if (i < argc - 1) { + strcpy(arguments->file, argv[++i]); + } else { + fprintf(stderr, "option -f requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for default database + else if (strcmp(argv[i], "-d") == 0) { + if (i < argc - 1) { + arguments->database = argv[++i]; + } else { + fprintf(stderr, "option -d requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For time zone + else if (strcmp(argv[i], "-t") == 0) { + if (i < argc - 1) { + arguments->timezone = argv[++i]; + } else { + fprintf(stderr, "option -t requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For import directory + else if (strcmp(argv[i], "-D") == 0) { + if (i < argc - 1) { + if (wordexp(argv[++i], &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", argv[i]); + exit(EXIT_FAILURE); + } + strcpy(arguments->dir, full_path.we_wordv[0]); + wordfree(&full_path); + } else { + fprintf(stderr, "option -D requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For time zone + else if (strcmp(argv[i], "-T") == 0) { + if (i < argc - 1) { + arguments->threadNum = atoi(argv[++i]); + } else { + fprintf(stderr, "option -T requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For temperory command TODO + else if (strcmp(argv[i], "--help") == 0) { + printHelp(); + exit(EXIT_FAILURE); + } else { + fprintf(stderr, "wrong options\n"); + printHelp(); + exit(EXIT_FAILURE); + } + } +} + +void shellReadCommand(TAOS *con, char *command) { + unsigned hist_counter = history.hend; + char utf8_array[10] = "\0"; + Command cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.buffer = (char *)calloc(1, MAX_COMMAND_SIZE); + cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE); + showOnScreen(&cmd); + + // Read input. + char c; + while (1) { + c = getchar(); + + if (c < 0) { // For UTF-8 + int count = countPrefixOnes(c); + utf8_array[0] = c; + for (int k = 1; k < count; k++) { + c = getchar(); + utf8_array[k] = c; + } + insertChar(&cmd, utf8_array, count); + } else if (c < '\033') { + // Ctrl keys. TODO: Implement ctrl combinations + switch (c) { + case 1: // ctrl A + positionCursorHome(&cmd); + break; + case 3: + printf("\n"); + resetCommand(&cmd, ""); + kill(0, SIGINT); + break; + case 4: // EOF or Ctrl+D + printf("\n"); + taos_close(con); + // write the history + write_history(); + exitShell(); + break; + case 5: // ctrl E + positionCursorEnd(&cmd); + break; + case 8: + backspaceChar(&cmd); + break; + case '\n': + case '\r': + printf("\n"); + if (isReadyGo(&cmd)) { + sprintf(command, "%s%s", cmd.buffer, cmd.command); + tfree(cmd.buffer); + tfree(cmd.command); + return; + } else { + updateBuffer(&cmd); + } + break; + case 12: // Ctrl + L; + system("clear"); + showOnScreen(&cmd); + break; + } + } else if (c == '\033') { + c = getchar(); + switch (c) { + case '[': + c = getchar(); + switch (c) { + case 'A': // Up arrow + if (hist_counter != history.hstart) { + hist_counter = (hist_counter + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE; + resetCommand(&cmd, (history.hist[hist_counter] == NULL) ? "" : history.hist[hist_counter]); + } + break; + case 'B': // Down arrow + if (hist_counter != history.hend) { + int next_hist = (hist_counter + 1) % MAX_HISTORY_SIZE; + + if (next_hist != history.hend) { + resetCommand(&cmd, (history.hist[next_hist] == NULL) ? "" : history.hist[next_hist]); + } else { + resetCommand(&cmd, ""); + } + hist_counter = next_hist; + } + break; + case 'C': // Right arrow + moveCursorRight(&cmd); + break; + case 'D': // Left arrow + moveCursorLeft(&cmd); + break; + case '1': + if ((c = getchar()) == '~') { + // Home key + positionCursorHome(&cmd); + } + break; + case '2': + if ((c = getchar()) == '~') { + // Insert key + } + break; + case '3': + if ((c = getchar()) == '~') { + // Delete key + deleteChar(&cmd); + } + break; + case '4': + if ((c = getchar()) == '~') { + // End key + positionCursorEnd(&cmd); + } + break; + case '5': + if ((c = getchar()) == '~') { + // Page up key + } + break; + case '6': + if ((c = getchar()) == '~') { + // Page down key + } + break; + case 72: + // Home key + positionCursorHome(&cmd); + break; + case 70: + // End key + positionCursorEnd(&cmd); + break; + } + break; + } + } else if (c == 0x7f) { + // press delete key + backspaceChar(&cmd); + } else { + insertChar(&cmd, &c, 1); + } + } +} + +void *shellLoopQuery(void *arg) { + if (indicator) { + get_old_terminal_mode(&oldtio); + indicator = 0; + } + + TAOS *con = (TAOS *)arg; + + pthread_cleanup_push(cleanup_handler, NULL); + + char *command = malloc(MAX_COMMAND_SIZE); + if (command == NULL){ + tscError("failed to malloc command"); + return NULL; + } + while (1) { + // Read command from shell. + + memset(command, 0, MAX_COMMAND_SIZE); + set_terminal_mode(); + shellReadCommand(con, command); + reset_terminal_mode(); + + // Run the command + shellRunCommand(con, command); + } + + pthread_cleanup_pop(1); + + return NULL; +} + +void shellPrintNChar(char *str, int width, bool printMode) { + int col_left = width; + wchar_t wc; + while (col_left > 0) { + if (*str == '\0') break; + char *tstr = str; + int byte_width = mbtowc(&wc, tstr, MB_CUR_MAX); + if (byte_width <= 0) break; + int col_width = wcwidth(wc); + if (col_width <= 0) { + str += byte_width; + continue; + } + if (col_left < col_width) break; + printf("%lc", wc); + str += byte_width; + col_left -= col_width; + } + + while (col_left > 0) { + printf(" "); + col_left--; + } + + if (!printMode) { + printf("|"); + } else { + printf("\n"); + } +} + +int get_old_terminal_mode(struct termios *tio) { + /* Make sure stdin is a terminal. */ + if (!isatty(STDIN_FILENO)) { + return -1; + } + + // Get the parameter of current terminal + if (tcgetattr(0, &oldtio) != 0) { + return -1; + } + + return 1; +} + +void reset_terminal_mode() { + if (tcsetattr(0, TCSANOW, &oldtio) != 0) { + fprintf(stderr, "Fail to reset the terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void set_terminal_mode() { + struct termios newtio; + + /* if (atexit(reset_terminal_mode) != 0) { */ + /* fprintf(stderr, "Error register exit function!\n"); */ + /* exit(EXIT_FAILURE); */ + /* } */ + + memcpy(&newtio, &oldtio, sizeof(oldtio)); + + // Set new terminal attributes. + newtio.c_iflag &= ~(IXON | IXOFF | ICRNL | INLCR | IGNCR | IMAXBEL | ISTRIP); + newtio.c_iflag |= IGNBRK; + + // newtio.c_oflag &= ~(OPOST|ONLCR|OCRNL|ONLRET); + newtio.c_oflag |= OPOST; + newtio.c_oflag |= ONLCR; + newtio.c_oflag &= ~(OCRNL | ONLRET); + + newtio.c_lflag &= ~(IEXTEN | ICANON | ECHO | ECHOE | ECHONL | ECHOCTL | ECHOPRT | ECHOKE | ISIG); + newtio.c_cc[VMIN] = 1; + newtio.c_cc[VTIME] = 0; + + if (tcsetattr(0, TCSANOW, &newtio) != 0) { + fprintf(stderr, "Fail to set terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void get_history_path(char *history) { sprintf(history, "%s/%s", getpwuid(getuid())->pw_dir, HISTORY_FILE); } + +void clearScreen(int ecmd_pos, int cursor_pos) { + struct winsize w; + ioctl(0, TIOCGWINSZ, &w); + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + positionCursor(cursor_y, LEFT); + positionCursor(command_x - cursor_x, DOWN); + fprintf(stdout, "\033[2K"); + for (int i = 0; i < command_x; i++) { + positionCursor(1, UP); + fprintf(stdout, "\033[2K"); + } + fflush(stdout); +} + +void showOnScreen(Command *cmd) { + struct winsize w; + if (ioctl(0, TIOCGWINSZ, &w) < 0 || w.ws_col == 0 || w.ws_row == 0) { + fprintf(stderr, "No stream device\n"); + exit(EXIT_FAILURE); + } + + wchar_t wc; + int size = 0; + + // Print out the command. + char *total_string = malloc(MAX_COMMAND_SIZE); + memset(total_string, '\0', MAX_COMMAND_SIZE); + if (strcmp(cmd->buffer, "") == 0) { + sprintf(total_string, "%s%s", PROMPT_HEADER, cmd->command); + } else { + sprintf(total_string, "%s%s", CONTINUE_PROMPT, cmd->command); + } + + int remain_column = w.ws_col; + /* size = cmd->commandSize + prompt_size; */ + for (char *str = total_string; size < cmd->commandSize + prompt_size;) { + int ret = mbtowc(&wc, str, MB_CUR_MAX); + if (ret < 0) break; + size += ret; + /* assert(size >= 0); */ + int width = wcwidth(wc); + if (remain_column > width) { + printf("%lc", wc); + remain_column -= width; + } else { + if (remain_column == width) { + printf("%lc\n\r", wc); + remain_column = w.ws_col; + } else { + printf("\n\r%lc", wc); + remain_column = w.ws_col - width; + } + } + + str = total_string + size; + } + + free(total_string); + /* for (int i = 0; i < size; i++){ */ + /* char c = total_string[i]; */ + /* if (k % w.ws_col == 0) { */ + /* printf("%c\n\r", c); */ + /* } */ + /* else { */ + /* printf("%c", c); */ + /* } */ + /* k += 1; */ + /* } */ + + // Position the cursor + int cursor_pos = cmd->screenOffset + prompt_size; + int ecmd_pos = cmd->endOffset + prompt_size; + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + // int cursor_y = cursor % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + int command_y = ecmd_pos % w.ws_col; + // int command_y = (command.size() + prompt_size) % w.ws_col; + positionCursor(command_y, LEFT); + positionCursor(command_x, UP); + positionCursor(cursor_x, DOWN); + positionCursor(cursor_y, RIGHT); + fflush(stdout); +} + +void cleanup_handler(void *arg) { tcsetattr(0, TCSANOW, &oldtio); } + +void exitShell() { + tcsetattr(0, TCSANOW, &oldtio); + exit(EXIT_SUCCESS); +} diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 1ca969a4682f99eb0ca8329eb03bd3c06a8f2438..27a4aaaa0cd24d84b46003ec3195f130d3ee8c85 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -21,28 +21,13 @@ #include "shellCommand.h" #include "ttime.h" #include "tutil.h" +#include "taoserror.h" #include /**************** Global variables ****************/ -#ifdef WINDOWS - char CLIENT_VERSION[] = "Welcome to the TDengine shell from windows, client version:%s "; -#elif defined(DARWIN) - char CLIENT_VERSION[] = "Welcome to the TDengine shell from mac, client version:%s "; -#else - #ifdef CLUSTER - char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, enterprise client version:%s "; - #else - char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, community client version:%s "; - #endif -#endif - -#ifdef CLUSTER - char SERVER_VERSION[] = "enterprise server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; -#else - char SERVER_VERSION[] = "community server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; -#endif - +char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; @@ -54,7 +39,7 @@ History history; */ TAOS *shellInit(struct arguments *args) { printf("\n"); - printf(CLIENT_VERSION, taos_get_client_info()); + printf(CLIENT_VERSION, osName, taos_get_client_info()); fflush(stdout); // set options before initializing @@ -111,7 +96,7 @@ TAOS *shellInit(struct arguments *args) { exit(EXIT_SUCCESS); } -#ifdef LINUX +#ifndef WINDOWS if (args->dir[0] != 0) { source_dir(con, args); taos_close(con); @@ -119,8 +104,6 @@ TAOS *shellInit(struct arguments *args) { } #endif - printf(SERVER_VERSION, taos_get_server_info(con)); - return con; } @@ -751,7 +734,7 @@ int isCommentLine(char *line) { void source_file(TAOS *con, char *fptr) { wordexp_t full_path; int read_len = 0; - char * cmd = malloc(MAX_COMMAND_SIZE); + char * cmd = calloc(1, MAX_COMMAND_SIZE); size_t cmd_len = 0; char * line = NULL; size_t line_len = 0; @@ -817,11 +800,16 @@ void source_file(TAOS *con, char *fptr) { } void shellGetGrantInfo(void *con) { -#ifdef CLUSTER char sql[] = "show grants"; - if (taos_query(con, sql)) { - fprintf(stdout, "\n"); + int code = taos_query(con, sql); + + if (code != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_OPS_NOT_SUPPORT) { + fprintf(stdout, "Server is Community Edition, version is %s\n\n", taos_get_server_info(con)); + } else { + fprintf(stderr, "Failed to check Server Edition, Reason:%d:%s\n\n", taos_errno(con), taos_errstr(con)); + } return; } @@ -843,18 +831,18 @@ void shellGetGrantInfo(void *con) { exit(0); } - char version[32] = {0}; + char serverVersion[32] = {0}; char expiretime[32] = {0}; char expired[32] = {0}; - memcpy(version, row[0], fields[0].bytes); + memcpy(serverVersion, row[0], fields[0].bytes); memcpy(expiretime, row[1], fields[1].bytes); memcpy(expired, row[2], fields[2].bytes); if (strcmp(expiretime, "unlimited") == 0) { - fprintf(stdout, "This is the %s version and will never expire.\n", version); + fprintf(stdout, "Server is Enterprise %s Edition, version is %s and will never expire.\n", serverVersion, taos_get_server_info(con)); } else { - fprintf(stdout, "This is the %s version and will expire at %s.\n", version, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, version is %s and will expire at %s.\n", serverVersion, taos_get_server_info(con), expiretime); } taos_free_result(result); @@ -862,5 +850,4 @@ void shellGetGrantInfo(void *con) { } fprintf(stdout, "\n"); -#endif } diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index 10576348038ff898a9e4b9e2b672ad9814aa660d..dd04f935e7a30f6a8775b831c3ec726855f520f4 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -90,20 +90,12 @@ static void shellParseDirectory(const char *directoryName, const char *prefix, c static void shellCheckTablesSQLFile(const char *directoryName) { - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/tables.sql", directoryName); + sprintf(shellTablesSQLFile, "%s/tables.sql", directoryName); - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(0); + struct stat fstat; + if (stat(shellTablesSQLFile, &fstat) < 0) { + shellTablesSQLFile[0] = 0; } - - while (fscanf(fp, "%s", shellTablesSQLFile)) { - break; - } - - pclose(fp); } static void shellMallocSQLFiles() diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index bcb9c7b4e7d219f98251d6f4111675507fd97eea..081b9eae319f3570ab11b67e075292648dd76161 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -119,13 +119,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { static struct argp argp = {options, parse_opt, args_doc, doc}; void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { - char verType[32] = {0}; - #ifdef CLUSTER - sprintf(verType, "enterprise version: %s\n", version); - #else - sprintf(verType, "community version: %s\n", version); - #endif - + static char verType[32] = {0}; + sprintf(verType, "version: %s\n", version); + argp_program_version = verType; argp_parse(&argp, argc, argv, 0, 0, arguments); diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 4b7b073eed5216d8903e82e52ef89eccaaeb6dae..24855ab8b51a87aed91ac10e8278941fe60bfee1 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -17,6 +17,7 @@ #include #include +#include #ifndef _ALPINE #include @@ -351,6 +352,11 @@ int main(int argc, char *argv[]) { } FILE *fp = fopen(arguments.output_file, "a"); + if (NULL == fp) { + fprintf(stderr, "Failed to open %s for writing\n", arguments.output_file); + return 1; + }; + time_t tTime = time(NULL); struct tm tm = *localtime(&tTime); @@ -570,7 +576,7 @@ void *readTable(void *sarg) { double totalT = 0; int count = 0; for (int i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%d where ts>= %ld", aggreFunc[j], tb_prefix, i, sTime); + sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); double t = getCurrentTime(); if (taos_query(taos, command) != 0) { @@ -813,7 +819,7 @@ double getCurrentTime() { void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) { memset(res, 0, MAX_DATA_SIZE); char *pstr = res; - pstr += sprintf(pstr, "(%ld", timestamp); + pstr += sprintf(pstr, "(%" PRId64, timestamp); int c = 0; for (; c < MAX_NUM_DATATYPE; c++) { @@ -830,7 +836,7 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam } else if (strcasecmp(data_type[i % c], "int") == 0) { pstr += sprintf(pstr, ", %d", (int)(rand() % 10)); } else if (strcasecmp(data_type[i % c], "bigint") == 0) { - pstr += sprintf(pstr, ", %ld", rand() % 2147483648); + pstr += sprintf(pstr, ", %" PRId64, rand() % 2147483648); } else if (strcasecmp(data_type[i % c], "float") == 0) { pstr += sprintf(pstr, ", %10.4f", (float)(rand() / 1000)); } else if (strcasecmp(data_type[i % c], "double") == 0) { @@ -842,7 +848,7 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam } else if (strcasecmp(data_type[i % c], "binary") == 0) { char s[len_of_binary]; rand_string(s, len_of_binary); - pstr += sprintf(pstr, ", %s", s); + pstr += sprintf(pstr, ", \"%s\"", s); } } diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index f722d24c26859900692b62d088f5e2a7ddfd9ef7..8cf015b342649ff4e099a51a2c4b7fe841da3db3 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -797,7 +797,10 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI if (metric != NULL && metric[0] != '\0') { // dump metric definition count = taosGetTableDes(metric, tableDes); - if (count < 0) return -1; + if (count < 0) { + free(tableDes); + return -1; + } taosDumpCreateTableClause(tableDes, count, arguments, fp); @@ -805,18 +808,26 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI count = taosGetTableDes(table, tableDes); - if (count < 0) return -1; + if (count < 0) { + free(tableDes); + return -1; + } taosDumpCreateMTableClause(tableDes, metric, count, arguments, fp); } else { // dump table definition count = taosGetTableDes(table, tableDes); - if (count < 0) return -1; + if (count < 0) { + free(tableDes); + return -1; + } taosDumpCreateTableClause(tableDes, count, arguments, fp); } + free(tableDes); + return taosDumpTableData(fp, table, arguments); } @@ -879,7 +890,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { if (arguments->schemaonly) return 0; - sprintf(command, "select * from %s where _c0 >= %ld and _c0 <= %ld order by _c0 asc", tbname, arguments->start_time, + sprintf(command, "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", tbname, arguments->start_time, arguments->end_time); if (taos_query(taos, command) != 0) { fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(taos)); @@ -933,13 +944,13 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { pstr += sprintf(pstr, "%d", *((int *)row[col])); break; case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, "%ld", *((int64_t *)row[col])); + pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col])); break; case TSDB_DATA_TYPE_FLOAT: - pstr += sprintf(pstr, "%f", *((float *)row[col])); + pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col])); break; case TSDB_DATA_TYPE_DOUBLE: - pstr += sprintf(pstr, "%f", *((double *)row[col])); + pstr += sprintf(pstr, "%f", GET_DOUBLE_VAL(row[col])); break; case TSDB_DATA_TYPE_BINARY: *(pstr++) = '\''; @@ -952,7 +963,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { pstr += sprintf(pstr, "\'%s\'", tbuf); break; case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, "%ld", *(int64_t *)row[col]); + pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]); break; default: break; @@ -1134,7 +1145,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(tcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu: %ld failed to run command %s reason:%s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu: %" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, taos_errstr(taos)); pstr = command; @@ -1182,7 +1193,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(tcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu:%ld failed to run command %s reason: %s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason: %s \ncontinue...\n", linenu, command, taos_errstr(taos)); } @@ -1205,7 +1216,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(lcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu:%ld failed to run command %s reason:%s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, taos_errstr(taos)); } diff --git a/src/modules/http/src/gcJson.c b/src/modules/http/src/gcJson.c index ecd923564473a534018df4bcfd8269e9da55fb14..1a86c5d24f23ec62bb1c51aabdd0639940edc54a 100644 --- a/src/modules/http/src/gcJson.c +++ b/src/modules/http/src/gcJson.c @@ -79,7 +79,9 @@ void gcStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd) { if (jsonBuf == NULL) return; // write end of target - gcWriteTargetEndJson(jsonBuf); + if (cmd->numOfRows != 0) { + gcWriteTargetEndJson(jsonBuf); + } } bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) { @@ -116,8 +118,8 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, if (groupFields == -1 && cmd->numOfRows == 0) { gcWriteTargetStartJson(jsonBuf, refIdBuffer, aliasBuffer); - cmd->numOfRows += numOfRows; } + cmd->numOfRows += numOfRows; for (int k = 0; k < numOfRows; ++k) { TAOS_ROW row = taos_fetch_row(result); @@ -158,7 +160,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, } break; default: - snprintf(target, HTTP_GC_TARGET_SIZE, "%s%s", aliasBuffer, "invalidcol"); + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%s", aliasBuffer, "-"); break; } @@ -217,7 +219,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, } break; default: - httpJsonString(jsonBuf, "invalidcol", 10); + httpJsonString(jsonBuf, "-", 1); break; } } diff --git a/src/modules/http/src/httpJson.c b/src/modules/http/src/httpJson.c index 2bb768e8016d27eb53fe15223e2c70134493176b..5d5d29f4e0fae91c8e30bfb3aaa78fb440b8a188 100644 --- a/src/modules/http/src/httpJson.c +++ b/src/modules/http/src/httpJson.c @@ -119,7 +119,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) { return 0; // there is no data to dump. } else { int len = sprintf(sLen, "%lx\r\n", srcLen); - httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%ld, response:\n%s", + httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s", buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, buf->buf); httpWriteBufNoTrace(buf->pContext, sLen, len); remain = httpWriteBufNoTrace(buf->pContext, buf->buf, (int) srcLen); @@ -131,7 +131,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) { if (ret == 0) { if (compressBufLen > 0) { int len = sprintf(sLen, "%x\r\n", compressBufLen); - httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%ld, compressSize:%d, last:%d, response:\n%s", + httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s", buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, compressBufLen, isTheLast, buf->buf); httpWriteBufNoTrace(buf->pContext, sLen, len); remain = httpWriteBufNoTrace(buf->pContext, (const char *) compressBuf, (int) compressBufLen); @@ -257,7 +257,7 @@ void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int maxLen) { void httpJsonInt64(JsonBuf* buf, int64_t num) { httpJsonItemToken(buf); httpJsonTestBuf(buf, MAX_NUM_STR_SZ); - buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%ld", num); + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRId64, num); } void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) { @@ -310,7 +310,9 @@ void httpJsonInt(JsonBuf* buf, int num) { void httpJsonFloat(JsonBuf* buf, float num) { httpJsonItemToken(buf); httpJsonTestBuf(buf, MAX_NUM_STR_SZ); - if (num > 1E10 || num < -1E10) { + if (isinf(num) || isnan(num)) { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "null"); + } else if (num > 1E10 || num < -1E10) { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.5e", num); } else { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.5f", num); @@ -320,7 +322,9 @@ void httpJsonFloat(JsonBuf* buf, float num) { void httpJsonDouble(JsonBuf* buf, double num) { httpJsonItemToken(buf); httpJsonTestBuf(buf, MAX_NUM_STR_SZ); - if (num > 1E10 || num < -1E10) { + if (isinf(num) || isnan(num)) { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "null"); + } else if (num > 1E10 || num < -1E10) { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.9e", num); } else { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.9f", num); diff --git a/src/modules/http/src/restHandle.c b/src/modules/http/src/restHandle.c index 58509e693d8a76279c85c865825661e9a972efcd..a3077008661590e9813b6f3ec91b6c15d9e42902 100644 --- a/src/modules/http/src/restHandle.c +++ b/src/modules/http/src/restHandle.c @@ -67,10 +67,16 @@ bool restProcessSqlRequest(HttpContext* pContext, int timestampFmt) { return false; } + + /* + * for async test + * / + /* if (httpCheckUsedbSql(sql)) { httpSendErrorResp(pContext, HTTP_NO_EXEC_USEDB); return false; } + */ HttpSqlCmd* cmd = &(pContext->singleCmd); cmd->nativSql = sql; diff --git a/src/modules/http/src/restJson.c b/src/modules/http/src/restJson.c index 6c04d39f45f91e93a94dbe5b71dd0aa606979b2b..7e98472d538b2f3b8733936d98ce182162336a91 100644 --- a/src/modules/http/src/restJson.c +++ b/src/modules/http/src/restJson.c @@ -152,7 +152,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, } if (cmd->numOfRows >= tsRestRowLimit) { - httpTrace("context:%p, fd:%d, ip:%s, user:%s, retrieve rows:%lld larger than limit:%d, abort retrieve", pContext, + httpTrace("context:%p, fd:%d, ip:%s, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext, pContext->fd, pContext->ipstr, pContext->user, cmd->numOfRows, tsRestRowLimit); return false; } @@ -163,7 +163,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, return false; } else { - httpTrace("context:%p, fd:%d, ip:%s, user:%s, total rows:%lld retrieved", pContext, pContext->fd, pContext->ipstr, + httpTrace("context:%p, fd:%d, ip:%s, user:%s, total rows:%d retrieved", pContext, pContext->fd, pContext->ipstr, pContext->user, cmd->numOfRows); return true; } diff --git a/src/modules/http/src/tgHandle.c b/src/modules/http/src/tgHandle.c index cec1e40c4c4cd0efc3bb5d1c1ab811ee811c9773..b9adf5416274d8f380d8b00a033f2d83fa249084 100644 --- a/src/modules/http/src/tgHandle.c +++ b/src/modules/http/src/tgHandle.c @@ -572,7 +572,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { orderTagsLen = orderTagsLen < TSDB_MAX_TAGS ? orderTagsLen : TSDB_MAX_TAGS; table_cmd->tagNum = stable_cmd->tagNum = (int8_t)orderTagsLen; - table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint); + table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%" PRId64, timestamp->valueint); // stable name char *stname = tgGetStableName(name->valuestring, fields, fieldsSize); @@ -593,7 +593,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { if (tag->type == cJSON_String) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring); else if (tag->type == cJSON_Number) - stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "%ld", tag->valueint); + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "%" PRId64, tag->valueint); else if (tag->type == cJSON_True) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "1"); else if (tag->type == cJSON_False) @@ -614,7 +614,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "_%s", tag->valuestring); else if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "_%ld", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "_%" PRId64, tag->valueint); else if (tag->type == cJSON_False) httpAddToSqlCmdBufferNoTerminal(pContext, "_0"); else if (tag->type == cJSON_True) @@ -670,7 +670,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { cJSON *tag = orderedTags[i]; if (i != orderTagsLen - 1) { if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "%ld,", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "%" PRId64 ",", tag->valueint); else if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "'%s',", tag->valuestring); else if (tag->type == cJSON_False) @@ -682,7 +682,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { } } else { if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "%ld)", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "%" PRId64 ")", tag->valueint); else if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "'%s')", tag->valuestring); else if (tag->type == cJSON_False) @@ -695,7 +695,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { } } - httpAddToSqlCmdBufferNoTerminal(pContext, " values(%ld,", timestamp->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, " values(%" PRId64 ",", timestamp->valueint); for (int i = 0; i < fieldsSize; ++i) { cJSON *field = cJSON_GetArrayItem(fields, i); if (i != fieldsSize - 1) { diff --git a/src/modules/monitor/src/monitorSystem.c b/src/modules/monitor/src/monitorSystem.c index 9d132e51ce379f64fadabc4a57e9073b7d905904..f403a272935b67db3f8cdded152b41be19f22fd5 100644 --- a/src/modules/monitor/src/monitorSystem.c +++ b/src/modules/monitor/src/monitorSystem.c @@ -217,9 +217,7 @@ void monitorInitDatabaseCb(void *param, TAOS_RES *result, int code) { if (monitor->cmdIndex == MONITOR_CMD_CREATE_TB_LOG) { taosLogFp = monitorSaveLog; taosLogSqlFp = monitorExecuteSQL; -#ifdef CLUSTER taosLogAcctFp = monitorSaveAcctLog; -#endif monitorLPrint("dnode:%s is started", tsPrivateIp); } monitor->cmdIndex++; diff --git a/src/os/darwin/inc/os.h b/src/os/darwin/inc/os.h index ea7a95c4dafd075570417ceaef4ef97c2dc5b207..1aececeec9ad3f3447fe46bb03dc61ddfb9ec8f2 100644 --- a/src/os/darwin/inc/os.h +++ b/src/os/darwin/inc/os.h @@ -1,57 +1,85 @@ /* -* Copyright (c) 2019 TAOS Data, Inc. -* -* This program is free software: you can use, redistribute, and/or modify -* it under the terms of the GNU Affero General Public License, version 3 -* or later ("AGPL"), as published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, but WITHOUT -* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -* FITNESS FOR A PARTICULAR PURPOSE. -* -* You should have received a copy of the GNU Affero General Public License -* along with this program. If not, see . -*/ - - -#ifndef TDENGINE_PLATFORM_DARWIN_H -#define TDENGINE_PLATFORM_DARWIN_H + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_PLATFORM_LINUX_H +#define TDENGINE_PLATFORM_LINUX_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include -#include -#include -#include -#include -#include -#include #include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include #include +#include #include +#include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #define htobe64 htonll #define taosCloseSocket(x) \ { \ - if (FD_VALID(x)) { \ + if (FD_VALID(x)) { \ close(x); \ - x = -1; \ + x = FD_INITIALIZER; \ } \ } + #define taosWriteSocket(fd, buf, len) write(fd, buf, len) #define taosReadSocket(fd, buf, len) read(fd, buf, len) @@ -160,7 +188,7 @@ (__a < __b) ? __a : __b; \ }) -#define MILLISECOND_PER_SECOND (1000L) +#define MILLISECOND_PER_SECOND ((int64_t)1000L) #define tsem_t dispatch_semaphore_t @@ -197,6 +225,10 @@ bool taosSkipSocketCheck(); bool taosGetDisk(); +int fsendfile(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count); + +void taosSetCoreDump(); + typedef int(*__compar_fn_t)(const void *, const void *); // for send function in tsocket.c @@ -219,4 +251,8 @@ typedef int(*__compar_fn_t)(const void *, const void *); #define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZ(val) __builtin_ctz(val) -#endif \ No newline at end of file +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/darwin/src/tdarwin.c b/src/os/darwin/src/tdarwin.c index 133bb4893cc9aa2f8b561036ffaff6a53e0db3a7..af3b1bd8a50d9f52d5f81f4dd42f483c510866c9 100644 --- a/src/os/darwin/src/tdarwin.c +++ b/src/os/darwin/src/tdarwin.c @@ -33,11 +33,12 @@ #include "tsdb.h" #include "tutil.h" -char configDir[TSDB_FILENAME_LEN] = "~/TDengine/cfg"; -char tsDirectory[TSDB_FILENAME_LEN] = "~/TDengine/data"; -char dataDir[TSDB_FILENAME_LEN] = "~/TDengine/data"; -char logDir[TSDB_FILENAME_LEN] = "~/TDengine/log"; -char scriptDir[TSDB_FILENAME_LEN] = "~/TDengine/script"; +char configDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char tsDirectory[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char dataDir[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char logDir[TSDB_FILENAME_LEN] = "~/TDengineLog"; +char scriptDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char osName[] = "Darwin"; int64_t str2int64(char *str) { char *endptr = NULL; @@ -418,4 +419,43 @@ int32_t __sync_val_load_32(int32_t *ptr) { void __sync_val_restore_32(int32_t *ptr, int32_t newval) { __atomic_store_n(ptr, newval, __ATOMIC_RELEASE); -} \ No newline at end of file +} + +#define _SEND_FILE_STEP_ 1000 + +int fsendfile(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count) { + fseek(in_file, (int32_t)(*offset), 0); + int writeLen = 0; + uint8_t buffer[_SEND_FILE_STEP_] = { 0 }; + + for (int len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { + size_t rlen = fread(buffer, 1, _SEND_FILE_STEP_, in_file); + if (rlen <= 0) { + return writeLen; + } + else if (rlen < _SEND_FILE_STEP_) { + fwrite(buffer, 1, rlen, out_file); + return (int)(writeLen + rlen); + } + else { + fwrite(buffer, 1, _SEND_FILE_STEP_, in_file); + writeLen += _SEND_FILE_STEP_; + } + } + + int remain = count - writeLen; + if (remain > 0) { + size_t rlen = fread(buffer, 1, remain, in_file); + if (rlen <= 0) { + return writeLen; + } + else { + fwrite(buffer, 1, remain, out_file); + writeLen += remain; + } + } + + return writeLen; +} + +void taosSetCoreDump() {} \ No newline at end of file diff --git a/src/os/linux/src/tlinux.c b/src/os/linux/src/tlinux.c index ccd6fc8a340f8132edc94f8639692eedfb2024c6..b81b98a5f7fb9cea96879c570a05fbca8e4e74b5 100644 --- a/src/os/linux/src/tlinux.c +++ b/src/os/linux/src/tlinux.c @@ -39,6 +39,7 @@ char tsDirectory[TSDB_FILENAME_LEN] = "/var/lib/taos"; char dataDir[TSDB_FILENAME_LEN] = "/var/lib/taos"; char logDir[TSDB_FILENAME_LEN] = "/var/log/taos"; char scriptDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char osName[] = "Linux"; int64_t str2int64(char *str) { char *endptr = NULL; diff --git a/src/os/linux/src/tsystem.c b/src/os/linux/src/tsystem.c index c3b8b41c9d5536972d345efd4c6d1974ae9d77dd..8cd0e6943616f4ecb1f69ed100ca6535968005ae 100644 --- a/src/os/linux/src/tsystem.c +++ b/src/os/linux/src/tsystem.c @@ -517,7 +517,8 @@ bool taosGetProcIO(float *readKB, float *writeKB) { static int64_t lastReadbyte = -1; static int64_t lastWritebyte = -1; - int64_t curReadbyte, curWritebyte; + int64_t curReadbyte = 0; + int64_t curWritebyte = 0; if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { return false; diff --git a/src/os/windows/inc/os.h b/src/os/windows/inc/os.h index 69ceed18f963c1658bc7334c37fa16401eceeac8..3f957e8abd489abdbe7bb009b3d4fa47aadb4467 100644 --- a/src/os/windows/inc/os.h +++ b/src/os/windows/inc/os.h @@ -17,10 +17,10 @@ #define TDENGINE_PLATFORM_WINDOWS_H #include -#include +#include #include -#include -#include +#include +#include #include #include #include @@ -35,13 +35,15 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include #include "winsock2.h" #include +#include + #ifdef __cplusplus extern "C" { #endif @@ -75,7 +77,13 @@ extern "C" { #define strncasecmp _strnicmp #define wcsncasecmp _wcsnicmp #define strtok_r strtok_s -#define str2int64 _atoi64 +#ifdef _TD_GO_DLL_ + int64_t str2int64(char *str); + uint64_t htonll(uint64_t val); +#else + #define str2int64 _atoi64 +#endif + #define snprintf _snprintf #define in_addr_t unsigned long #define socklen_t int @@ -136,7 +144,12 @@ extern "C" { #define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val)) #define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val)) -#define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval)) +#ifdef _TD_GO_DLL_ + #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap +#else + #define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval)) +#endif + #define atomic_val_compare_exchange_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval)) #define atomic_val_compare_exchange_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval)) #define atomic_val_compare_exchange_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval)) @@ -156,9 +169,14 @@ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val); #else #define atomic_add_fetch_ptr atomic_add_fetch_32 #endif +#ifdef _TD_GO_DLL_ + #define atomic_fetch_add_8 __sync_fetch_and_ad + #define atomic_fetch_add_16 __sync_fetch_and_add +#else + #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val)) +#endif -#define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val)) #define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val)) #define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -186,14 +204,17 @@ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val); #else #define atomic_fetch_sub_ptr atomic_fetch_sub_32 #endif - -char interlocked_and_fetch_8(char volatile* ptr, char val); -short interlocked_and_fetch_16(short volatile* ptr, short val); +#ifndef _TD_GO_DLL_ + char interlocked_and_fetch_8(char volatile* ptr, char val); + short interlocked_and_fetch_16(short volatile* ptr, short val); +#endif long interlocked_and_fetch_32(long volatile* ptr, long val); __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val); -#define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val)) -#define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val)) + #define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_and_fetch_32(ptr, val) interlocked_and_fetch_32((long volatile*)(ptr), (long)(val)) #define atomic_and_fetch_64(ptr, val) interlocked_and_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -201,9 +222,10 @@ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val); #else #define atomic_and_fetch_ptr atomic_and_fetch_32 #endif - -#define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_fetch_and_32(ptr, val) _InterlockedAnd((long volatile*)(ptr), (long)(val)) #ifdef _M_IX86 @@ -218,14 +240,17 @@ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val); #else #define atomic_fetch_and_ptr atomic_fetch_and_32 #endif - -char interlocked_or_fetch_8(char volatile* ptr, char val); -short interlocked_or_fetch_16(short volatile* ptr, short val); +#ifndef _TD_GO_DLL_ + char interlocked_or_fetch_8(char volatile* ptr, char val); + short interlocked_or_fetch_16(short volatile* ptr, short val); +#endif long interlocked_or_fetch_32(long volatile* ptr, long val); __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val); -#define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val)) -#define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val)) + #define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_or_fetch_32(ptr, val) interlocked_or_fetch_32((long volatile*)(ptr), (long)(val)) #define atomic_or_fetch_64(ptr, val) interlocked_or_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -233,9 +258,10 @@ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val); #else #define atomic_or_fetch_ptr atomic_or_fetch_32 #endif - -#define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_fetch_or_32(ptr, val) _InterlockedOr((long volatile*)(ptr), (long)(val)) #ifdef _M_IX86 @@ -251,13 +277,17 @@ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val); #define atomic_fetch_or_ptr atomic_fetch_or_32 #endif -char interlocked_xor_fetch_8(char volatile* ptr, char val); -short interlocked_xor_fetch_16(short volatile* ptr, short val); +#ifndef _TD_GO_DLL_ + char interlocked_xor_fetch_8(char volatile* ptr, char val); + short interlocked_xor_fetch_16(short volatile* ptr, short val); +#endif long interlocked_xor_fetch_32(long volatile* ptr, long val); __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val); -#define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val)) -#define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val)) + #define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_xor_fetch_32(ptr, val) interlocked_xor_fetch_32((long volatile*)(ptr), (long)(val)) #define atomic_xor_fetch_64(ptr, val) interlocked_xor_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -266,8 +296,10 @@ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val); #define atomic_xor_fetch_ptr atomic_xor_fetch_32 #endif -#define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_fetch_xor_32(ptr, val) _InterlockedXor((long volatile*)(ptr), (long)(val)) #ifdef _M_IX86 @@ -293,7 +325,11 @@ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val); #define MAX(a,b) (((a)>(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) -#define MILLISECOND_PER_SECOND (1000i64) +#ifdef _TD_GO_DLL_ + #define MILLISECOND_PER_SECOND (1000LL) +#else + #define MILLISECOND_PER_SECOND (1000i64) +#endif #define tsem_t sem_t #define tsem_init sem_init diff --git a/src/os/windows/src/twindows.c b/src/os/windows/src/twindows.c index 98be6b60ba16e52b2177971d95930f7f717785aa..30973165dfd03f6179f4fc36a83c4b4a32ed3e55 100644 --- a/src/os/windows/src/twindows.c +++ b/src/os/windows/src/twindows.c @@ -28,11 +28,16 @@ #include "tsdb.h" #include "tglobalcfg.h" +#include +#include +#include + char configDir[TSDB_FILENAME_LEN] = "C:/TDengine/cfg"; char tsDirectory[TSDB_FILENAME_LEN] = "C:/TDengine/data"; char logDir[TSDB_FILENAME_LEN] = "C:/TDengine/log"; char dataDir[TSDB_FILENAME_LEN] = "C:/TDengine/data"; char scriptDir[TSDB_FILENAME_LEN] = "C:/TDengine/script"; +char osName[] = "Windows"; bool taosCheckPthreadValid(pthread_t thread) { return thread.p != NULL; @@ -68,11 +73,19 @@ int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optle // add char interlocked_add_fetch_8(char volatile* ptr, char val) { - return _InterlockedExchangeAdd8(ptr, val) + val; + #ifdef _TD_GO_DLL_ + return __sync_fetch_and_add(ptr, val) + val; + #else + return _InterlockedExchangeAdd8(ptr, val) + val; + #endif } short interlocked_add_fetch_16(short volatile* ptr, short val) { - return _InterlockedExchangeAdd16(ptr, val) + val; + #ifdef _TD_GO_DLL_ + return __sync_fetch_and_add(ptr, val) + val; + #else + return _InterlockedExchangeAdd16(ptr, val) + val; + #endif } long interlocked_add_fetch_32(long volatile* ptr, long val) { @@ -84,6 +97,7 @@ __int64 interlocked_add_fetch_64(__int64 volatile* ptr, __int64 val) { } // and +#ifndef _TD_GO_DLL_ char interlocked_and_fetch_8(char volatile* ptr, char val) { return _InterlockedAnd8(ptr, val) & val; } @@ -91,6 +105,7 @@ char interlocked_and_fetch_8(char volatile* ptr, char val) { short interlocked_and_fetch_16(short volatile* ptr, short val) { return _InterlockedAnd16(ptr, val) & val; } +#endif long interlocked_and_fetch_32(long volatile* ptr, long val) { return _InterlockedAnd(ptr, val) & val; @@ -124,6 +139,7 @@ __int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val) { #endif // or +#ifndef _TD_GO_DLL_ char interlocked_or_fetch_8(char volatile* ptr, char val) { return _InterlockedOr8(ptr, val) | val; } @@ -131,7 +147,7 @@ char interlocked_or_fetch_8(char volatile* ptr, char val) { short interlocked_or_fetch_16(short volatile* ptr, short val) { return _InterlockedOr16(ptr, val) | val; } - +#endif long interlocked_or_fetch_32(long volatile* ptr, long val) { return _InterlockedOr(ptr, val) | val; } @@ -164,6 +180,7 @@ __int64 interlocked_fetch_or_64(__int64 volatile* ptr, __int64 val) { #endif // xor +#ifndef _TD_GO_DLL_ char interlocked_xor_fetch_8(char volatile* ptr, char val) { return _InterlockedXor8(ptr, val) ^ val; } @@ -171,7 +188,7 @@ char interlocked_xor_fetch_8(char volatile* ptr, char val) { short interlocked_xor_fetch_16(short volatile* ptr, short val) { return _InterlockedXor16(ptr, val) ^ val; } - +#endif long interlocked_xor_fetch_32(long volatile* ptr, long val) { return _InterlockedXor(ptr, val) ^ val; } @@ -396,4 +413,16 @@ char *strndup(const char *s, size_t n) { return r; } -void taosSetCoreDump() {} \ No newline at end of file +void taosSetCoreDump() {} + +#ifdef _TD_GO_DLL_ +int64_t str2int64(char *str) { + char *endptr = NULL; + return strtoll(str, &endptr, 10); +} + +uint64_t htonll(uint64_t val) +{ + return (((uint64_t) htonl(val)) << 32) + htonl(val >> 32); +} +#endif \ No newline at end of file diff --git a/src/rpc/src/trpc.c b/src/rpc/src/trpc.c index db1ca33841c502f2b983501880412d79bf02e175..9e0b5dab0f1f2ca587af3dd9f3233b355d1d80af 100755 --- a/src/rpc/src/trpc.c +++ b/src/rpc/src/trpc.c @@ -169,7 +169,7 @@ static int32_t taosCompressRpcMsg(char* pCont, int32_t contLen) { memcpy(pCont + overhead, buf, compLen); pHeader->comp = 1; - tTrace("compress rpc msg, before:%lld, after:%lld", contLen, compLen); + tTrace("compress rpc msg, before:%d, after:%d", contLen, compLen); finalLen = compLen + overhead; //tDump(pCont, contLen); @@ -906,7 +906,10 @@ int taosProcessMsgHeader(STaosHeader *pHeader, SRpcConn **ppConn, STaosRpc *pSer } if (taosAuthenticateMsg((uint8_t *)pHeader, dataLen - TSDB_AUTH_LEN, pDigest->auth, pConn->secret) < 0) { - tTrace("%s cid:%d sid:%d id:%s, authentication failed, msg discarded pConn:%p", pServer->label, chann, sid, + char ipstr[24]; + tinet_ntoa(ipstr, ip); + mLError("user:%s login from %s, authentication failed", pHeader->meterId, ipstr); + tError("%s cid:%d sid:%d id:%s, authentication failed, msg discarded pConn:%p", pServer->label, chann, sid, pConn->meterId, pConn); code = TSDB_CODE_AUTH_FAILURE; goto _exit; diff --git a/src/rpc/src/tstring.c b/src/rpc/src/tstring.c index 8e55cfe3f59818c42cc7db9826796bae08d7c8f9..a254ceecfd1f6ce13b1cc30f8c0c87b6b8edfca9 100644 --- a/src/rpc/src/tstring.c +++ b/src/rpc/src/tstring.c @@ -197,7 +197,7 @@ char *tsError[] = {"success", "invalid query handle", // 70 "tables related to metric exist", "can't drop monitor database or tables", - "commit log init failed", + "no disk permissions", "vgroup init failed", "data is already imported", // 75 "not supported operation", @@ -231,10 +231,10 @@ char *tsError[] = {"success", "batch size too big", "timestamp out of range", //105 "invalid query message", - "timestamp disordered in cache block", + "too many results from vnodes for sort", "timestamp disordered in file block", "invalid commit log", - "server no disk space", //110 + "no disk space on server", //110 "only super table has metric meta info", "tags value not unique for join", "invalid submit message", diff --git a/src/rpc/src/tudp.c b/src/rpc/src/tudp.c index fb0b37d93baaf98a6caed35ec980de72090819f2..db3e5e81c43754abe46cd907463737ceeb2b1116 100644 --- a/src/rpc/src/tudp.c +++ b/src/rpc/src/tudp.c @@ -296,7 +296,7 @@ void *taosTransferDataViaTcp(void *argv) { } if (!taosCheckHandleViaTcpValid(&handleViaTcp)) { - tError("%s UDP server read handle via tcp invalid, handle:%ld, hash:%ld", pSet->label, handleViaTcp.handle, + tError("%s UDP server read handle via tcp invalid, handle:%" PRIu64 ", hash:%" PRIu64, pSet->label, handleViaTcp.handle, handleViaTcp.hash); taosCloseSocket(connFd); free(pTransfer); @@ -698,12 +698,17 @@ int taosSendPacketViaTcp(uint32_t ip, uint16_t port, char *data, int dataLen, vo // send a UDP header first to set up the connection pHead = (STaosHeader *)buffer; memcpy(pHead, data, sizeof(STaosHeader)); + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wbitfield-constant-conversion" pHead->tcp = 2; +#pragma GCC diagnostic pop + msgLen = sizeof(STaosHeader); pHead->msgLen = (int32_t)htonl(msgLen); code = taosSendUdpData(ip, port, buffer, msgLen, chandle); - pHead = (STaosHeader *)data; + //pHead = (STaosHeader *)data; tinet_ntoa(ipstr, ip); int fd = taosOpenTcpClientSocket(ipstr, pConn->port, tsLocalIp); diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c index 9a5e6413f3c6e7671d5a198afc7d5068751628d3..77b6f6d958bce36163adac74550060862f5eb1ec 100644 --- a/src/sdb/src/sdbEngine.c +++ b/src/sdb/src/sdbEngine.c @@ -24,6 +24,8 @@ extern char version[]; const int16_t sdbFileVersion = 0; int sdbExtConns = 0; +SIpList *pSdbIpList = NULL; +SIpList *pSdbPublicIpList = NULL; #ifdef CLUSTER int sdbMaster = 0; @@ -373,22 +375,22 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { */ pTable->id++; sdbVersion++; - sdbPrint("table:%s, record:%s already exist, think it successed, sdbVersion:%ld id:%d", + sdbPrint("table:%s, record:%s already exist, think it successed, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); return 0; } else { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, (char *)row, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%s sdbVersion:%" PRId64 " id:%" PRId64 , pTable->name, (char *)row, sdbVersion, pTable->id); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%s sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to insert record:%d sdbVersion:%ld id:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%d sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to insert record sdbVersion:%ld id:%d", pTable->name, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); break; } return -1; @@ -452,19 +454,19 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { pTable->numOfRows++; switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, (char *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; default: - sdbTrace("table:%s, a record is inserted, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; } @@ -555,19 +557,19 @@ int sdbDeleteRow(void *handle, void *row) { sdbAddIntoUpdateList(pTable, SDB_TYPE_DELETE, pMetaRow); switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: - sdbTrace("table:%s, a record is deleted, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, sdbVersion, pTable->id, pTable->numOfRows); break; } @@ -602,19 +604,19 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { if (pMeta == NULL) { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, (char *) row, sdbVersion, pTable->id); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *) row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, *(int32_t *) row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to update record, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); break; } @@ -674,19 +676,19 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is updated:%d, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%d, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: - sdbTrace("table:%s, a record is updated, sdbVersion:%ld id:%ld numOfRows:%d", pTable->name, sdbVersion, + sdbTrace("table:%s, a record is updated, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, sdbVersion, pTable->id, pTable->numOfRows); break; } @@ -795,7 +797,7 @@ void sdbCloseTable(void *handle) { pthread_mutex_destroy(&pTable->mutex); sdbNumOfTables--; - sdbTrace("table:%s is closed, id:%ld numOfTables:%d", pTable->name, pTable->id, sdbNumOfTables); + sdbTrace("table:%s is closed, id:%" PRId64 " numOfTables:%d", pTable->name, pTable->id, sdbNumOfTables); tfree(pTable->update); tfree(pTable); @@ -899,7 +901,7 @@ void sdbResetTable(SSdbTable *pTable) { tfree(rowHead); - sdbPrint("table:%s is updated, sdbVerion:%ld id:%ld", pTable->name, sdbVersion, pTable->id); + sdbPrint("table:%s is updated, sdbVerion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); } // TODO:A problem here :use snapshot file to sync another node will cause diff --git a/src/system/detail/inc/vnode.h b/src/system/detail/inc/vnode.h index 435184463b4a4dd7fccdbebc10b717d883609b26..60449de9f5467ed6341215733156d1d8de3fb4d1 100644 --- a/src/system/detail/inc/vnode.h +++ b/src/system/detail/inc/vnode.h @@ -239,10 +239,19 @@ typedef struct SQuery { int lfd; // only for query in file, last file handle SCompBlock *pBlock; // only for query in file SField ** pFields; + int numOfBlocks; // only for query in file int blockBufferSize; // length of pBlock buffer int currentSlot; int firstSlot; + + /* + * the two parameters are utilized to handle the data missing situation, caused by import operation. + * When the commit slot is the first slot, and commitPoints != 0 + */ + int32_t commitSlot; // which slot is committed, + int32_t commitPoint; // starting point for next commit + int slot; int pos; TSKEY key; @@ -251,6 +260,7 @@ typedef struct SQuery { TSKEY skey; TSKEY ekey; int64_t nAggTimeInterval; + int64_t slidingTime; // sliding time for sliding window query char intervalTimeUnit; // interval data type, used for daytime revise int8_t precision; int16_t numOfOutputCols; diff --git a/src/system/detail/inc/vnodeQueryImpl.h b/src/system/detail/inc/vnodeQueryImpl.h index a26e9b6285af39156edf20d331e0afaaab4bf578..dc86f924aa1655291cb9ee97c2b23d8386ae46a7 100644 --- a/src/system/detail/inc/vnodeQueryImpl.h +++ b/src/system/detail/inc/vnodeQueryImpl.h @@ -22,7 +22,8 @@ extern "C" { #include "os.h" -#include "ihash.h" +#include "hash.h" +#include "hashutil.h" #define GET_QINFO_ADDR(x) ((char*)(x)-offsetof(SQInfo, query)) #define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0) @@ -63,7 +64,7 @@ typedef enum { * the next query. * * this status is only exist in group-by clause and - * diff/add/division/mulitply/ query. + * diff/add/division/multiply/ query. */ QUERY_RESBUF_FULL = 0x2, @@ -117,11 +118,9 @@ typedef enum { #define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) typedef int (*__block_search_fn_t)(char* data, int num, int64_t key, int order); -typedef int32_t (*__read_data_fn_t)(int fd, SQInfo* pQInfo, SQueryFilesInfo* pQueryFile, char* buf, uint64_t offset, - int32_t size); static FORCE_INLINE SMeterObj* getMeterObj(void* hashHandle, int32_t sid) { - return *(SMeterObj**)taosGetIntHashData(hashHandle, sid); + return *(SMeterObj**)taosGetDataFromHash(hashHandle, (const char*) &sid, sizeof(sid)); } bool isQueryKilled(SQuery* pQuery); @@ -130,6 +129,7 @@ bool isPointInterpoQuery(SQuery* pQuery); bool isTopBottomQuery(SQuery* pQuery); bool isFirstLastRowQuery(SQuery* pQuery); bool isTSCompQuery(SQuery* pQuery); +bool notHasQueryTimeRange(SQuery *pQuery); bool needSupplementaryScan(SQuery* pQuery); bool onDemandLoadDatablock(SQuery* pQuery, int16_t queryRangeSet); @@ -149,7 +149,6 @@ void vnodeScanAllData(SQueryRuntimeEnv* pRuntimeEnv); int32_t vnodeQueryResultInterpolate(SQInfo* pQInfo, tFilePage** pDst, tFilePage** pDataSrc, int32_t numOfRows, int32_t* numOfInterpo); void copyResToQueryResultBuf(SMeterQuerySupportObj* pSupporter, SQuery* pQuery); -void moveDescOrderResultsToFront(SQueryRuntimeEnv* pRuntimeEnv); void doSkipResults(SQueryRuntimeEnv* pRuntimeEnv); void doFinalizeResult(SQueryRuntimeEnv* pRuntimeEnv); @@ -159,7 +158,7 @@ void forwardIntervalQueryRange(SMeterQuerySupportObj* pSupporter, SQueryRuntimeE void forwardQueryStartPosition(SQueryRuntimeEnv* pRuntimeEnv); bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySupportObj* pSupporter, - SPointInterpoSupporter* pPointInterpSupporter); + SPointInterpoSupporter* pPointInterpSupporter, int64_t* key); void pointInterpSupporterInit(SQuery* pQuery, SPointInterpoSupporter* pInterpoSupport); void pointInterpSupporterDestroy(SPointInterpoSupporter* pPointInterpSupport); @@ -173,10 +172,10 @@ void enableFunctForMasterScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order); int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj* pSupporter); void copyFromGroupBuf(SQInfo* pQInfo, SOutputRes* result); -SBlockInfo getBlockBasicInfo(void* pBlock, int32_t blockType); -SCacheBlock* getCacheDataBlock(SMeterObj* pMeterObj, SQuery* pQuery, int32_t slot); +SBlockInfo getBlockBasicInfo(SQueryRuntimeEnv* pRuntimeEnv, void* pBlock, int32_t blockType); +SCacheBlock* getCacheDataBlock(SMeterObj* pMeterObj, SQueryRuntimeEnv* pRuntimeEnv, int32_t slot); -void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32_t blockStatus, char* data, +void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32_t blockStatus, SBlockInfo* pBlockBasicInfo, SMeterDataInfo* pDataHeadInfoEx, SField* pFields, __block_search_fn_t searchFn); @@ -278,6 +277,13 @@ void displayInterResult(SData** pdata, SQuery* pQuery, int32_t numOfRows); void vnodePrintQueryStatistics(SMeterQuerySupportObj* pSupporter); void clearGroupResultBuf(SOutputRes* pOneOutputRes, int32_t nOutputCols); +void copyGroupResultBuf(SOutputRes* dst, const SOutputRes* src, int32_t nOutputCols); + +void resetSlidingWindowInfo(SSlidingWindowInfo* pSlidingWindowInfo, int32_t numOfCols); +void clearCompletedSlidingWindows(SSlidingWindowInfo* pSlidingWindowInfo, int32_t numOfCols); +int32_t numOfClosedSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo); +void closeSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo, int32_t slot); +void closeAllSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo); #ifdef __cplusplus } diff --git a/src/system/detail/inc/vnodeRead.h b/src/system/detail/inc/vnodeRead.h index 0d749f60cc7ab3bd102ec812c1cc84ac42b65c11..ee88e5e36681a2a9d22faa5f757fa3c4f1f57a59 100644 --- a/src/system/detail/inc/vnodeRead.h +++ b/src/system/detail/inc/vnodeRead.h @@ -35,19 +35,19 @@ typedef struct { int32_t fileId; } SPositionInfo; -typedef struct SQueryLoadBlockInfo { +typedef struct SLoadDataBlockInfo { int32_t fileListIndex; /* index of this file in files list of this vnode */ int32_t fileId; int32_t slotIdx; int32_t sid; bool tsLoaded; // if timestamp column of current block is loaded or not -} SQueryLoadBlockInfo; +} SLoadDataBlockInfo; -typedef struct SQueryLoadCompBlockInfo { +typedef struct SLoadCompBlockInfo { int32_t sid; /* meter sid */ int32_t fileId; int32_t fileListIndex; -} SQueryLoadCompBlockInfo; +} SLoadCompBlockInfo; /* * the header file info for one vnode @@ -112,7 +112,31 @@ typedef struct SQueryFilesInfo { char dbFilePathPrefix[PATH_MAX]; } SQueryFilesInfo; -typedef struct RuntimeEnvironment { +typedef struct STimeWindow { + TSKEY skey; + TSKEY ekey; +} STimeWindow; + +typedef struct SWindowStatus { + STimeWindow window; + bool closed; +} SWindowStatus; + +typedef struct SSlidingWindowInfo { + SOutputRes* pResult; // reference to SQuerySupporter->pResult + SWindowStatus* pStatus; // current query window closed or not? + void* hashList; // hash list for quick access + int16_t type; // data type for hash key + int32_t capacity; // max capacity + int32_t curIndex; // current start active index + int32_t size; + + int64_t startTime; // start time of the first time window for sliding query + int64_t prevSKey; // previous (not completed) sliding window start key + int64_t threshold; // threshold for return completed results. +} SSlidingWindowInfo; + +typedef struct SQueryRuntimeEnv { SPositionInfo startPos; /* the start position, used for secondary/third iteration */ SPositionInfo endPos; /* the last access position in query, served as the start pos of reversed order query */ SPositionInfo nextPos; /* start position of the next scan */ @@ -126,20 +150,30 @@ typedef struct RuntimeEnvironment { SQuery* pQuery; SMeterObj* pMeterObj; SQLFunctionCtx* pCtx; - SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */ - SQueryLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */ - SQueryFilesInfo vnodeFileInfo; - int16_t numOfRowsPerPage; - int16_t offset[TSDB_MAX_COLUMNS]; - int16_t scanFlag; // denotes reversed scan of data or not - SInterpolationInfo interpoInfo; - SData** pInterpoBuf; - SOutputRes* pResult; // reference to SQuerySupporter->pResult - void* hashList; - int32_t usedIndex; // assigned SOutputRes in list - STSBuf* pTSBuf; - STSCursor cur; - SQueryCostSummary summary; + SLoadDataBlockInfo loadBlockInfo; /* record current block load information */ + SLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */ + SQueryFilesInfo vnodeFileInfo; + int16_t numOfRowsPerPage; + int16_t offset[TSDB_MAX_COLUMNS]; + int16_t scanFlag; // denotes reversed scan of data or not + SInterpolationInfo interpoInfo; + SData** pInterpoBuf; + + SSlidingWindowInfo swindowResInfo; + + STSBuf* pTSBuf; + STSCursor cur; + SQueryCostSummary summary; + + STimeWindow intervalWindow; // the complete time window, not affected by the actual data distribution + + /* + * Temporarily hold the in-memory cache block info during scan cache blocks + * Here we do not use the cacheblock info from pMeterObj, simple because it may change anytime + * during the query by the subumit/insert handling threads. + * So we keep a copy of the support structure as well as the cache block data itself. + */ + SCacheBlock cacheBlock; } SQueryRuntimeEnv; /* intermediate result during multimeter query involves interval */ @@ -172,7 +206,7 @@ typedef struct SMeterDataInfo { } SMeterDataInfo; typedef struct SMeterQuerySupportObj { - void* pMeterObj; + void* pMetersHashTable; // meter table hash list SMeterSidExtInfo** pMeterSidExtInfo; int32_t numOfMeters; @@ -229,7 +263,6 @@ typedef struct _qinfo { int killed; struct _qinfo *prev, *next; SQuery query; - int num; int totalPoints; int pointsRead; int pointsReturned; @@ -262,7 +295,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo* pQInfo, SQuery* pQuery, void* param) void vnodeDecMeterRefcnt(SQInfo* pQInfo); /* sql query handle in dnode */ -void vnodeSingleMeterQuery(SSchedMsg* pMsg); +void vnodeSingleTableQuery(SSchedMsg* pMsg); /* * handle multi-meter query process diff --git a/src/system/detail/inc/vnodeStatus.h b/src/system/detail/inc/vnodeStatus.h index 1a28d67e98c815ed7a5b3efd1072cb89c3c231b9..456304370d34e6f4101293e197962b069085689f 100644 --- a/src/system/detail/inc/vnodeStatus.h +++ b/src/system/detail/inc/vnodeStatus.h @@ -16,16 +16,20 @@ #ifndef TDENGINE_TSTATUS_H #define TDENGINE_TSTATUS_H +#include "taoserror.h" + #ifdef __cplusplus extern "C" { #endif enum _TSDB_VG_STATUS { - TSDB_VG_STATUS_READY, - TSDB_VG_STATUS_IN_PROGRESS, - TSDB_VG_STATUS_COMMITLOG_INIT_FAILED, - TSDB_VG_STATUS_INIT_FAILED, - TSDB_VG_STATUS_FULL + TSDB_VG_STATUS_READY = TSDB_CODE_SUCCESS, + TSDB_VG_STATUS_IN_PROGRESS = TSDB_CODE_ACTION_IN_PROGRESS, + TSDB_VG_STATUS_NO_DISK_PERMISSIONS = TSDB_CODE_NO_DISK_PERMISSIONS, + TSDB_VG_STATUS_SERVER_NO_PACE = TSDB_CODE_SERV_NO_DISKSPACE, + TSDB_VG_STATUS_SERV_OUT_OF_MEMORY = TSDB_CODE_SERV_OUT_OF_MEMORY, + TSDB_VG_STATUS_INIT_FAILED = TSDB_CODE_VG_INIT_FAILED, + TSDB_VG_STATUS_FULL = TSDB_CODE_NO_ENOUGH_DNODES, }; enum _TSDB_DB_STATUS { diff --git a/src/system/detail/src/dnodeMgmt.c b/src/system/detail/src/dnodeMgmt.c index 9842e0dad6eab3355c914a34484a58861a88d132..5e2b150cbb1960daa4990bbd7aef81154be3111b 100644 --- a/src/system/detail/src/dnodeMgmt.c +++ b/src/system/detail/src/dnodeMgmt.c @@ -153,7 +153,7 @@ int vnodeProcessAlterStreamRequest(char *pMsg, int msgLen, SMgmtObj *pObj) { } if (pAlter->sid >= pVnode->cfg.maxSessions || pAlter->sid < 0) { - dError("vid:%d sid:%d uid:%ld, sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); + dError("vid:%d sid:%d uid:%" PRIu64 ", sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); code = TSDB_CODE_INVALID_TABLE_ID; goto _over; } @@ -415,10 +415,10 @@ int vnodeProcessVPeerCfgRsp(char *msg, int msgLen, SMgmtObj *pMgmtObj) { int32_t *pint = (int32_t *)pRsp->more; int vnode = htonl(*pint); if (vnode < TSDB_MAX_VNODES && vnodeList[vnode].lastKey != 0) { - dError("vnode:%d not configured, it shall be empty"); + dError("vnode:%d not configured, it shall be empty, code:%d", vnode, pRsp->code); vnodeRemoveVnode(vnode); } else { - dTrace("vnode:%d is invalid", vnode); + dError("vnode:%d is invalid, code:%d", vnode, pRsp->code); } } diff --git a/src/system/detail/src/mgmtDb.c b/src/system/detail/src/mgmtDb.c index fb449f4761279c1d452092ce5f8f23a505bfa7e7..b935b68425e053a1a4ed466d65c8c790c701f384 100644 --- a/src/system/detail/src/mgmtDb.c +++ b/src/system/detail/src/mgmtDb.c @@ -668,6 +668,11 @@ int mgmtRetrieveDbs(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { pDb = (SDbObj *)pShow->pNode; if (pDb == NULL) break; pShow->pNode = (void *)pDb->next; + if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) { + if (strcmp(pConn->pUser->user, "root") != 0 && strcmp(pConn->pUser->user, "_root") != 0 && strcmp(pConn->pUser->user, "monitor") != 0 ) { + continue; + } + } cols = 0; diff --git a/src/system/detail/src/mgmtDnodeInt.c b/src/system/detail/src/mgmtDnodeInt.c index 6b6571b06c713bd9b6c41dab690652cf1c6fd639..a1dae7738f8f0f32b6c22666e62aff89e13f2c16 100644 --- a/src/system/detail/src/mgmtDnodeInt.c +++ b/src/system/detail/src/mgmtDnodeInt.c @@ -152,19 +152,15 @@ int mgmtProcessVPeersRsp(char *msg, int msgLen, SDnodeObj *pObj) { return 0; } - if (pRsp->code == 0) { + if (pRsp->code == TSDB_CODE_SUCCESS) { pDb->vgStatus = TSDB_VG_STATUS_READY; mTrace("dnode:%s, db:%s vgroup is created in dnode", taosIpStr(pObj->privateIp), pRsp->more); return 0; } - if (pRsp->code == TSDB_CODE_VG_COMMITLOG_INIT_FAILED) { - pDb->vgStatus = TSDB_VG_STATUS_COMMITLOG_INIT_FAILED; - mError("dnode:%s, db:%s vgroup commit log init failed, code:%d", taosIpStr(pObj->privateIp), pRsp->more, pRsp->code); - } else { - pDb->vgStatus = TSDB_VG_STATUS_INIT_FAILED; - mError("dnode:%s, db:%s vgroup init failed, code:%d", taosIpStr(pObj->privateIp), pRsp->more, pRsp->code); - } + pDb->vgStatus = pRsp->code; + mError("dnode:%s, db:%s vgroup init failed, code:%d %s", + taosIpStr(pObj->privateIp), pRsp->more, pRsp->code, taosGetVgroupStatusStr(pDb->vgStatus)); return 0; } @@ -469,8 +465,11 @@ int mgmtCfgDynamicOptions(SDnodeObj *pDnode, char *msg) { } int mgmtSendCfgDnodeMsg(char *cont) { +#ifdef CLUSTER char * pMsg, *pStart; int msgLen = 0; +#endif + SDnodeObj *pDnode; SCfgMsg * pCfg = (SCfgMsg *)cont; uint32_t ip; @@ -488,6 +487,7 @@ int mgmtSendCfgDnodeMsg(char *cont) { return code; } +#ifdef CLUSTER pStart = taosBuildReqMsg(pDnode->thandle, TSDB_MSG_TYPE_CFG_PNODE); if (pStart == NULL) return TSDB_CODE_NODE_OFFLINE; pMsg = pStart; @@ -497,6 +497,8 @@ int mgmtSendCfgDnodeMsg(char *cont) { msgLen = pMsg - pStart; taosSendMsgToDnode(pDnode, pStart, msgLen); - +#else + (void)tsCfgDynamicOptions(pCfg->config); +#endif return 0; } diff --git a/src/system/detail/src/mgmtMeter.c b/src/system/detail/src/mgmtMeter.c index 6ff9448c09caf49a6d26bb32d22c1eb4ffa98b5e..a2a6ed8a7d3e506ddfcd1926683a0d823906c36a 100644 --- a/src/system/detail/src/mgmtMeter.c +++ b/src/system/detail/src/mgmtMeter.c @@ -657,16 +657,13 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { return TSDB_CODE_NO_ENOUGH_DNODES; } - if (pDb->vgStatus == TSDB_VG_STATUS_COMMITLOG_INIT_FAILED) { + if (pDb->vgStatus == TSDB_VG_STATUS_NO_DISK_PERMISSIONS || + pDb->vgStatus == TSDB_VG_STATUS_SERVER_NO_PACE || + pDb->vgStatus == TSDB_VG_STATUS_SERV_OUT_OF_MEMORY || + pDb->vgStatus == TSDB_VG_STATUS_INIT_FAILED ) { mgmtDestroyMeter(pMeter); - mError("table:%s, commit log init failed", pCreate->meterId); - return TSDB_CODE_VG_COMMITLOG_INIT_FAILED; - } - - if (pDb->vgStatus == TSDB_VG_STATUS_INIT_FAILED) { - mgmtDestroyMeter(pMeter); - mError("table:%s, vgroup init failed", pCreate->meterId); - return TSDB_CODE_VG_INIT_FAILED; + mError("table:%s, vgroup init failed, reason:%d %s", pCreate->meterId, pDb->vgStatus, taosGetVgroupStatusStr(pDb->vgStatus)); + return pDb->vgStatus; } if (pVgroup == NULL) { @@ -691,7 +688,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->uid = (((uint64_t)pMeter->gid.vgId) << 40) + ((((uint64_t)pMeter->gid.sid) & ((1ul << 24) - 1ul)) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); - mTrace("table:%s, create table in vgroup, vgId:%d sid:%d vnode:%d uid:%llu db:%s", + mTrace("table:%s, create table in vgroup, vgId:%d sid:%d vnode:%d uid:%" PRIu64 " db:%s", pMeter->meterId, pVgroup->vgId, sid, pVgroup->vnodeGid[0].vnode, pMeter->uid, pDb->name); } else { pMeter->uid = (((uint64_t)pMeter->createdTime) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); @@ -1189,6 +1186,8 @@ int mgmtRetrieveMetricMeta(SConnObj *pConn, char **pStart, SMetricMetaMsg *pMetr int32_t * tagLen = calloc(1, sizeof(int32_t) * pMetricMetaMsg->numOfMeters); if (result == NULL || tagLen == NULL) { + tfree(result); + tfree(tagLen); return -1; } @@ -1270,6 +1269,11 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); if (pDb == NULL) return 0; + if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) { + if (strcmp(pConn->pUser->user, "root") != 0 && strcmp(pConn->pUser->user, "_root") != 0 && strcmp(pConn->pUser->user, "monitor") != 0 ) { + return 0; + } + } strcpy(prefix, pDb->name); strcat(prefix, TS_PATH_DELIMITER); @@ -1387,6 +1391,16 @@ int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn) char * pWrite; int cols = 0; + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pDb == NULL) return 0; + if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) { + if (strcmp(pConn->pUser->user, "root") != 0 && strcmp(pConn->pUser->user, "_root") != 0 && strcmp(pConn->pUser->user, "monitor") != 0 ) { + return 0; + } + } + SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; char metricName[TSDB_METER_NAME_LEN] = {0}; diff --git a/src/system/detail/src/mgmtProfile.c b/src/system/detail/src/mgmtProfile.c index e7dbeaaa254da098dcdac5a15b6b0feccb5f32f2..c1cd98952311f4cda6bf97cf2448de244299c8b8 100644 --- a/src/system/detail/src/mgmtProfile.c +++ b/src/system/detail/src/mgmtProfile.c @@ -499,10 +499,9 @@ int mgmtKillConnection(char *qidstr, SConnObj *pConn) { uint32_t ip = inet_addr(temp); temp = chr + 1; - short port = htons(atoi(temp)); - + uint16_t port = htons(atoi(temp)); SAcctObj *pAcct = pConn->pAcct; - + pthread_mutex_lock(&pAcct->mutex); pConn = pAcct->pConn; diff --git a/src/system/detail/src/mgmtShell.c b/src/system/detail/src/mgmtShell.c index 2872f5e7cac3f74a66d4853bc7c97def70563664..06556c817f0c6b1d99706893b0172ae6b732968f 100644 --- a/src/system/detail/src/mgmtShell.c +++ b/src/system/detail/src/mgmtShell.c @@ -257,7 +257,7 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { pRsp->code = TSDB_CODE_DB_NOT_SELECTED; pMsg++; } else { - mTrace("%s, uid:%lld meter meta is retrieved", pInfo->meterId, pMeterObj->uid); + mTrace("%s, uid:%" PRIu64 " meter meta is retrieved", pInfo->meterId, pMeterObj->uid); pRsp->code = 0; pMsg += sizeof(STaosRsp); *pMsg = TSDB_IE_TYPE_META; @@ -402,7 +402,7 @@ int mgmtProcessMultiMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (pMeterObj == NULL || (pDbObj == NULL)) { continue; } else { - mTrace("%s, uid:%lld sversion:%d meter meta is retrieved", tblName, pMeterObj->uid, pMeterObj->sversion); + mTrace("%s, uid:%" PRIu64 " sversion:%d meter meta is retrieved", tblName, pMeterObj->uid, pMeterObj->sversion); pMeta = (SMultiMeterMeta *)pCurMeter; memcpy(pMeta->meterId, tblName, strlen(tblName)); @@ -446,7 +446,7 @@ int mgmtProcessMultiMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (pVgroup == NULL) { pRsp->code = TSDB_CODE_INVALID_TABLE; pNewMsg++; - mError("%s, uid:%lld sversion:%d vgId:%d pVgroup is NULL", tblName, pMeterObj->uid, pMeterObj->sversion, + mError("%s, uid:%" PRIu64 " sversion:%d vgId:%d pVgroup is NULL", tblName, pMeterObj->uid, pMeterObj->sversion, pMeterObj->gid.vgId); goto _error_exit_code; } @@ -734,8 +734,11 @@ int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) { bool hasRight = false; + if (strcmp(pUser->user, "root") == 0) { hasRight = false; + } else if (strcmp(pUser->user, pUser->acct) == 0) { + hasRight = false; } else if (strcmp(pOperUser->user, "root") == 0) { hasRight = true; } else if (strcmp(pUser->user, pOperUser->user) == 0) { @@ -750,21 +753,24 @@ int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { } } + if (pAlter->privilege == 1) { // super + hasRight = false; + } + if (hasRight) { - if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) { - if (pAlter->privilege == 1) { // super - pUser->superAuth = 1; - pUser->writeAuth = 1; - } - if (pAlter->privilege == 2) { // read - pUser->superAuth = 0; - pUser->writeAuth = 0; - } - if (pAlter->privilege == 3) { // write - pUser->superAuth = 0; - pUser->writeAuth = 1; - } + //if (pAlter->privilege == 1) { // super + // pUser->superAuth = 1; + // pUser->writeAuth = 1; + //} + if (pAlter->privilege == 2) { // read + pUser->superAuth = 0; + pUser->writeAuth = 0; + } + if (pAlter->privilege == 3) { // write + pUser->superAuth = 0; + pUser->writeAuth = 1; } + code = mgmtUpdateUser(pUser); mLPrint("user:%s privilege is altered by %s, code:%d", pAlter->user, pConn->pUser->user, code); } else { @@ -894,7 +900,7 @@ int mgmtProcessShowMsg(char *pMsg, int msgLen, SConnObj *pConn) { SShowRspMsg *pShowRsp; SShowObj * pShow = NULL; - if (pShowMsg->type == TSDB_MGMT_TABLE_PNODE || TSDB_MGMT_TABLE_GRANTS || TSDB_MGMT_TABLE_SCORES) { + if (pShowMsg->type == TSDB_MGMT_TABLE_DNODE || TSDB_MGMT_TABLE_GRANTS || TSDB_MGMT_TABLE_SCORES) { if (mgmtCheckRedirectMsg(pConn, TSDB_MSG_TYPE_SHOW_RSP) != 0) { return 0; } @@ -1072,7 +1078,7 @@ int mgmtProcessCreateTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { STabObj* pMeter = mgmtGetMeter(pCreate->meterId); assert(pMeter != NULL); - mWarn("table:%s, table already created, failed to create table, ts:%lld, code:%d", pCreate->meterId, + mWarn("table:%s, table already created, failed to create table, ts:%" PRId64 ", code:%d", pCreate->meterId, pMeter->createdTime, code); } else { // other errors mError("table:%s, failed to create table, code:%d", pCreate->meterId, code); @@ -1196,21 +1202,28 @@ int mgmtProcessHeartBeatMsg(char *cont, int contLen, SConnObj *pConn) { pConn->streamId = 0; pHBRsp->killConnection = pConn->killConnection; -#ifdef CLUSTER if (pConn->usePublicIp) { - int size = pSdbPublicIpList->numOfIps * 4; - pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; - memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); - pMsg += sizeof(SHeartBeatRsp) + size; + if (pSdbPublicIpList != NULL) { + int size = pSdbPublicIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } else { + pHBRsp->ipList.numOfIps = 0; + pMsg += sizeof(SHeartBeatRsp); + } + } else { - int size = pSdbIpList->numOfIps * 4; - pHBRsp->ipList.numOfIps = pSdbIpList->numOfIps; - memcpy(pHBRsp->ipList.ip, pSdbIpList->ip, size); - pMsg += sizeof(SHeartBeatRsp) + size; + if (pSdbIpList != NULL) { + int size = pSdbIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } else { + pHBRsp->ipList.numOfIps = 0; + pMsg += sizeof(SHeartBeatRsp); + } } -#else - pMsg += sizeof(SHeartBeatRsp); -#endif msgLen = pMsg - pStart; taosSendMsgToPeer(pConn->thandle, pStart, msgLen); @@ -1328,15 +1341,22 @@ _rsp: pConnectRsp->superAuth = pConn->superAuth; pMsg += sizeof(SConnectRsp); -#ifdef CLUSTER - int size = pSdbPublicIpList->numOfIps * 4 + sizeof(SIpList); - if (pConn->usePublicIp) { - memcpy(pMsg, pSdbPublicIpList, size); + int size; + if (pSdbPublicIpList != NULL && pSdbIpList != NULL) { + size = pSdbPublicIpList->numOfIps * 4 + sizeof(SIpList); + if (pConn->usePublicIp) { + memcpy(pMsg, pSdbPublicIpList, size); + } else { + memcpy(pMsg, pSdbIpList, size); + } } else { - memcpy(pMsg, pSdbIpList, size); + SIpList tmpIpList; + tmpIpList.numOfIps = 0; + size = tmpIpList.numOfIps * 4 + sizeof(SIpList); + memcpy(pMsg, &tmpIpList, size); } + pMsg += size; -#endif // set the time resolution: millisecond or microsecond *((uint32_t *)pMsg) = tsTimePrecision; @@ -1467,8 +1487,8 @@ void mgmtInitProcessShellMsg() { mgmtProcessShellMsg[TSDB_MSG_TYPE_SHOW] = mgmtProcessShowMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_CONNECT] = mgmtProcessConnectMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_HEARTBEAT] = mgmtProcessHeartBeatMsg; - mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_PNODE] = mgmtProcessCreateDnodeMsg; - mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_PNODE] = mgmtProcessDropDnodeMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_DNODE] = mgmtProcessCreateDnodeMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_DNODE] = mgmtProcessDropDnodeMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_MNODE] = mgmtProcessCreateMnodeMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_MNODE] = mgmtProcessDropMnodeMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_CFG_MNODE] = mgmtProcessCfgMnodeMsg; diff --git a/src/system/detail/src/mgmtSupertableQuery.c b/src/system/detail/src/mgmtSupertableQuery.c index f83ffd42477c17c8c20b67aed0956fd1e15737eb..1b7ae66e6d5b01dd15e0dc26d91a03e37a2e29ab 100644 --- a/src/system/detail/src/mgmtSupertableQuery.c +++ b/src/system/detail/src/mgmtSupertableQuery.c @@ -329,7 +329,7 @@ int32_t mgmtDoJoin(SMetricMetaMsg* pMetricMetaMsg, tQueryResultset* pRes) { bool allEmpty = false; for (int32_t i = 0; i < pMetricMetaMsg->numOfMeters; ++i) { - if (pRes->num == 0) { // all results are empty if one of them is empty + if (pRes[i].num == 0) { // all results are empty if one of them is empty allEmpty = true; break; } diff --git a/src/system/detail/src/vnodeCache.c b/src/system/detail/src/vnodeCache.c index 36bf87210927a7280a425ee5c4c20af65b0a78c0..9f078b09ffd4e15e2221dc818e22d03fc8396ddd 100644 --- a/src/system/detail/src/vnodeCache.c +++ b/src/system/detail/src/vnodeCache.c @@ -630,7 +630,14 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { setNullN(pData, type, bytes, pCacheBlock->numOfPoints); } else { pRead = pCacheBlock->offset[colIdx] + startPos * bytes; - memcpy(pData, pRead, numOfReads * bytes); + + if (QUERY_IS_ASC_QUERY(pQuery)) { + memcpy(pData, pRead, numOfReads * bytes); + } else { + for(int32_t j = 0; j < numOfReads; ++j) { + memcpy(pData + bytes * j, pRead + (numOfReads - 1 - j) * bytes, bytes); + } + } } } numOfQualifiedPoints = numOfReads; @@ -653,8 +660,8 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j < pCacheBlock->numOfPoints; ++j) { TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); if (key < startkey || key > endkey) { - dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); + dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -668,8 +675,7 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { } ids[numOfQualifiedPoints] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } @@ -678,8 +684,8 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j >= 0; --j) { TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); if (key < startkey || key > endkey) { - dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); + dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -691,23 +697,22 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { continue; } - - ids[numOfReads - numOfQualifiedPoints - 1] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + + ids[numOfQualifiedPoints] = j; + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } } - int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; +// int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; for (int32_t j = 0; j < numOfQualifiedPoints; ++j) { for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { int16_t colIndex = pQuery->pSelectExpr[col].pBase.colInfo.colIdx; int32_t bytes = pObj->schema[colIndex].bytes; pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes; - pRead = pCacheBlock->offset[colIndex] + ids[j + start] * bytes; + pRead = pCacheBlock->offset[colIndex] + ids[j/* + start*/] * bytes; memcpy(pData, pRead, bytes); } @@ -962,10 +967,11 @@ void vnodeSetCommitQuery(SMeterObj *pObj, SQuery *pQuery) { if (firstKey < pQuery->skey) { pQuery->over = 1; - dTrace("vid:%d sid:%d id:%s, first key is small, keyFirst:%ld commitFirstKey:%ld", + dTrace("vid:%d sid:%d id:%s, first key is small, keyFirst:%" PRId64 " commitFirstKey:%" PRId64 "", pObj->vnode, pObj->sid, pObj->meterId, firstKey, pQuery->skey); pthread_mutex_lock(&(pVnode->vmutex)); if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + assert(pVnode->firstKey > 0); pthread_mutex_unlock(&(pVnode->vmutex)); } } @@ -1013,7 +1019,7 @@ int vnodeSyncRetrieveCache(int vnode, int fd) { if (taosWriteMsg(fd, &(pObj->lastKeyOnFile), sizeof(pObj->lastKeyOnFile)) <= 0) return -1; if (taosWriteMsg(fd, &(pInfo->commitPoint), sizeof(pInfo->commitPoint)) <= 0) return -1; - dTrace("vid:%d sid:%d id:%s, send lastKey:%lld lastKeyOnFile:%lld", vnode, sid, pObj->meterId, pObj->lastKey, + dTrace("vid:%d sid:%d id:%s, send lastKey:%" PRId64 " lastKeyOnFile:%" PRId64, vnode, sid, pObj->meterId, pObj->lastKey, pObj->lastKeyOnFile); slot = pInfo->commitSlot; @@ -1033,7 +1039,7 @@ int vnodeSyncRetrieveCache(int vnode, int fd) { if (taosWriteMsg(fd, pBlock->offset[col], pObj->schema[col].bytes * points) <= 0) return -1; TSKEY lastKey = *((TSKEY *)(pBlock->offset[0] + pObj->schema[0].bytes * (points - 1))); - dTrace("vid:%d sid:%d id:%s, cache block is sent, points:%d lastKey:%ld", vnode, sid, pObj->meterId, points, + dTrace("vid:%d sid:%d id:%s, cache block is sent, points:%d lastKey:%" PRId64, vnode, sid, pObj->meterId, points, lastKey); blocksSent++; @@ -1097,7 +1103,7 @@ int vnodeSyncRestoreCache(int vnode, int fd) { if (taosReadMsg(fd, &(pObj->lastKeyOnFile), sizeof(pObj->lastKeyOnFile)) <= 0) return -1; if (taosReadMsg(fd, &(pInfo->commitPoint), sizeof(pInfo->commitPoint)) <= 0) return -1; - dTrace("vid:%d sid:%d id:%s, commitPoint:%d lastKeyOnFile:%ld", vnode, sid, pObj->meterId, pInfo->commitPoint, + dTrace("vid:%d sid:%d id:%s, commitPoint:%d lastKeyOnFile:%" PRId64, vnode, sid, pObj->meterId, pInfo->commitPoint, pObj->lastKeyOnFile); if (vnodeList[pObj->vnode].lastKey < pObj->lastKey) vnodeList[pObj->vnode].lastKey = pObj->lastKey; @@ -1135,7 +1141,7 @@ int vnodeSyncRestoreCache(int vnode, int fd) { if (vnodeList[pObj->vnode].firstKey > *(TSKEY *)(pBlock->offset[0])) vnodeList[pObj->vnode].firstKey = *(TSKEY *)(pBlock->offset[0]); - dTrace("vid:%d sid:%d id:%s, cache block is received, points:%d lastKey:%ld", vnode, sid, pObj->meterId, points, + dTrace("vid:%d sid:%d id:%s, cache block is received, points:%d lastKey:%" PRId64, vnode, sid, pObj->meterId, points, pObj->lastKey); } } diff --git a/src/system/detail/src/vnodeCommit.c b/src/system/detail/src/vnodeCommit.c index b5c9f8074536d1b18061b124f70f93e064e2b316..a650376afac0615c1966e6c0181adfdf41910329 100644 --- a/src/system/detail/src/vnodeCommit.c +++ b/src/system/detail/src/vnodeCommit.c @@ -51,7 +51,7 @@ int vnodeOpenCommitLog(int vnode, uint64_t firstV) { int64_t length = statbuf.st_size; if (length != pVnode->mappingSize) { - dError("vid:%d, logfd:%d, alloc file size:%ld not equal to mapping size:%ld", vnode, pVnode->logFd, length, + dError("vid:%d, logfd:%d, alloc file size:%" PRId64 " not equal to mapping size:%" PRId64, vnode, pVnode->logFd, length, pVnode->mappingSize); goto _err_log_open; } diff --git a/src/system/detail/src/vnodeFile.c b/src/system/detail/src/vnodeFile.c index b69320a03c9d586dd17e668ad50f0a763eff0dd4..9c53d47507b18582333aa64abc9f39fb1cab5407 100644 --- a/src/system/detail/src/vnodeFile.c +++ b/src/system/detail/src/vnodeFile.c @@ -197,7 +197,7 @@ int vnodeCreateNeccessaryFiles(SVnodeObj *pVnode) { numOfFiles = (pVnode->lastKeyOnFile - pVnode->commitFirstKey) / tsMsPerDay[(uint8_t)pVnode->cfg.precision] / pCfg->daysPerFile; if (pVnode->commitFirstKey > pVnode->lastKeyOnFile) numOfFiles = -1; - dTrace("vid:%d, commitFirstKey:%ld lastKeyOnFile:%ld numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", pVnode->vnode, + dTrace("vid:%d, commitFirstKey:%" PRId64 " lastKeyOnFile:%" PRId64 " numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", pVnode->vnode, pVnode->commitFirstKey, pVnode->lastKeyOnFile, numOfFiles, pVnode->fileId, pVnode->numOfFiles); if (numOfFiles >= pVnode->numOfFiles) { @@ -251,7 +251,7 @@ int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { fileId = pVnode->commitFileId; - dTrace("vid:%d, commit fileId:%d, commitLastKey:%ld, vnodeLastKey:%ld, lastKeyOnFile:%ld numOfFiles:%d", + dTrace("vid:%d, commit fileId:%d, commitLastKey:%" PRId64 ", vnodeLastKey:%" PRId64 ", lastKeyOnFile:%" PRId64 " numOfFiles:%d", vnode, fileId, pVnode->commitLastKey, pVnode->lastKey, pVnode->lastKeyOnFile, pVnode->numOfFiles); int minSize = sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM) + TSDB_FILE_HEADER_LEN; @@ -506,7 +506,7 @@ void *vnodeCommitMultiToFile(SVnodeObj *pVnode, int ssid, int esid) { SVnodeHeadInfo headInfo; uint8_t * pOldCompBlocks; - dPrint("vid:%d, committing to file, firstKey:%ld lastKey:%ld ssid:%d esid:%d", vnode, pVnode->firstKey, + dPrint("vid:%d, committing to file, firstKey:%" PRId64 " lastKey:%" PRId64 " ssid:%d esid:%d", vnode, pVnode->firstKey, pVnode->lastKey, ssid, esid); if (pVnode->lastKey == 0) goto _over; @@ -573,7 +573,7 @@ _again: memset(&query, 0, sizeof(query)); if (vnodeOpenCommitFiles(pVnode, ssid) < 0) goto _over; - dTrace("vid:%d, start to commit, commitFirstKey:%ld commitLastKey:%ld", vnode, pVnode->commitFirstKey, + dTrace("vid:%d, start to commit, commitFirstKey:%" PRId64 " commitLastKey:%" PRId64, vnode, pVnode->commitFirstKey, pVnode->commitLastKey); headLen = 0; @@ -642,7 +642,7 @@ _again: read(pVnode->hfd, &pMeter->lastBlock, sizeof(SCompBlock)); } } else { - dTrace("vid:%d sid:%d id:%s, uid:%ld is not matched w/ old:%ld, old data will be thrown away", + dTrace("vid:%d sid:%d id:%s, uid:%" PRIu64 " is not matched with old:%" PRIu64 ", old data will be thrown away", vnode, sid, pObj->meterId, pObj->uid, compInfo.uid); pMeter->oldNumOfBlocks = 0; } @@ -683,7 +683,7 @@ _again: query.sdata = data; vnodeSetCommitQuery(pObj, &query); - dTrace("vid:%d sid:%d id:%s, start to commit, startKey:%lld slot:%d pos:%d", pObj->vnode, pObj->sid, pObj->meterId, + dTrace("vid:%d sid:%d id:%s, start to commit, startKey:%" PRId64 " slot:%d pos:%d", pObj->vnode, pObj->sid, pObj->meterId, pObj->lastKeyOnFile, query.slot, query.pos); pointsRead = 0; @@ -760,7 +760,7 @@ _again: pMeter->newNumOfBlocks++; pMeter->committedPoints += (pointsRead - pointsReadLast); - dTrace("vid:%d sid:%d id:%s, pointsRead:%d, pointsReadLast:%d lastKey:%lld, " + dTrace("vid:%d sid:%d id:%s, pointsRead:%d, pointsReadLast:%d lastKey:%" PRId64 ", " "slot:%d pos:%d newNumOfBlocks:%d headLen:%d", pObj->vnode, pObj->sid, pObj->meterId, pointsRead, pointsReadLast, pObj->lastKeyOnFile, query.slot, query.pos, pMeter->newNumOfBlocks, headLen); @@ -771,7 +771,7 @@ _again: pointsReadLast = 0; } - dTrace("vid:%d sid:%d id:%s, %d points are committed, lastKey:%lld slot:%d pos:%d newNumOfBlocks:%d", + dTrace("vid:%d sid:%d id:%s, %d points are committed, lastKey:%" PRId64 " slot:%d pos:%d newNumOfBlocks:%d", pObj->vnode, pObj->sid, pObj->meterId, pMeter->committedPoints, pObj->lastKeyOnFile, query.slot, query.pos, pMeter->newNumOfBlocks); @@ -1093,7 +1093,7 @@ int vnodeReadColumnToMem(int fd, SCompBlock *pBlock, SField **fields, int col, c } if (len <= 0) { - dError("failed to read col:%d, offset:%ld, reason:%s", col, tfields[col].offset, strerror(errno)); + dError("failed to read col:%d, offset:%d, reason:%s", col, (int32_t)(tfields[col].offset), strerror(errno)); return -1; } @@ -1218,7 +1218,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] int dfd = pVnode->dfd; if (pCompBlock->last && (points < pObj->pointsPerFileBlock * tsFileBlockMinPercent)) { - dTrace("vid:%d sid:%d id:%s, points:%d are written to last block, block stime: %ld, block etime: %ld", + dTrace("vid:%d sid:%d id:%s, points:%d are written to last block, block stime: %" PRId64 ", block etime: %" PRId64, pObj->vnode, pObj->sid, pObj->meterId, points, *((TSKEY *)(data[0]->data)), *((TSKEY * )(data[0]->data + (points - 1) * pObj->schema[0].bytes))); pCompBlock->last = 1; @@ -1303,7 +1303,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] pCompBlock->len += wlen; } - dTrace("vid:%d, vnode compStorage size is: %ld", pObj->vnode, pVnode->vnodeStatistic.compStorage); + dTrace("vid:%d, vnode compStorage size is: %" PRId64, pObj->vnode, pVnode->vnodeStatistic.compStorage); pCompBlock->algorithm = pCfg->compression; pCompBlock->numOfPoints = points; @@ -1355,7 +1355,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { if (pQuery->skey < oldest) pQuery->skey = oldest; } - dTrace("vid:%d sid:%d id:%s, skey:%ld ekey:%ld oldest:%ld latest:%ld fileId:%d numOfFiles:%d", + dTrace("vid:%d sid:%d id:%s, skey:%" PRId64 " ekey:%" PRId64 " oldest:%" PRId64 " latest:%" PRId64 " fileId:%d numOfFiles:%d", pObj->vnode, pObj->sid, pObj->meterId, pQuery->skey, pQuery->ekey, oldest, latest, pVnode->fileId, pVnode->numOfFiles); @@ -1383,7 +1383,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { firstSlot = 0; lastSlot = pQuery->numOfBlocks - 1; - numOfBlocks = pQuery->numOfBlocks; + //numOfBlocks = pQuery->numOfBlocks; if (QUERY_IS_ASC_QUERY(pQuery) && pBlock[lastSlot].keyLast < pQuery->skey) continue; if (!QUERY_IS_ASC_QUERY(pQuery) && pBlock[firstSlot].keyFirst > pQuery->skey) continue; @@ -1640,11 +1640,15 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { pData = pQuery->sdata[i]->data + pQuery->pointsOffset * bytes; pRead = sdata[colBufferIndex]->data + startPos * bytes; - memcpy(pData, pRead, numOfReads * bytes); + if (QUERY_IS_ASC_QUERY(pQuery)) { + memcpy(pData, pRead, numOfReads * bytes); + } else { //reversed copy to output buffer + for(int32_t j = 0; j < numOfReads; ++j) { + memcpy(pData + bytes * j, pRead + (numOfReads - 1 - j) * bytes, bytes); + } + } } - numOfQualifiedPoints = numOfReads; - } else { // check each data one by one set the input column data for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { @@ -1659,8 +1663,8 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j < pBlock->numOfPoints; j -= step) { TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); if (key < startKey || key > endKey) { - dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); + dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -1675,8 +1679,7 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { } ids[numOfQualifiedPoints] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } @@ -1684,8 +1687,8 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = pQuery->pos; j >= 0; --j) { TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); if (key < startKey || key > endKey) { - dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); + dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -1698,22 +1701,21 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { continue; } - - ids[numOfReads - numOfQualifiedPoints - 1] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + + ids[numOfQualifiedPoints] = j; + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } } - int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; +// int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; for (int32_t j = 0; j < numOfQualifiedPoints; ++j) { for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { int16_t colIndexInBuffer = pQuery->pSelectExpr[col].pBase.colInfo.colIdxInBuf; int32_t bytes = GET_COLUMN_BYTES(pQuery, col); pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes; - pRead = sdata[colIndexInBuffer]->data + ids[j + start] * bytes; + pRead = sdata[colIndexInBuffer]->data + ids[j/* + start*/] * bytes; memcpy(pData, pRead, bytes); } @@ -1823,7 +1825,7 @@ int vnodeUpdateFileMagic(int vnode, int fileId) { } int vnodeInitFile(int vnode) { - int code = 0; + int code = TSDB_CODE_SUCCESS; SVnodeObj *pVnode = vnodeList + vnode; pVnode->maxFiles = pVnode->cfg.daysToKeep / pVnode->cfg.daysPerFile + 1; diff --git a/src/system/detail/src/vnodeImport.c b/src/system/detail/src/vnodeImport.c index f7f01a3c69946238130746106f387d536eb0bd6a..7ebab90f0baed0b79936fced88c3a129cff6f170 100644 --- a/src/system/detail/src/vnodeImport.c +++ b/src/system/detail/src/vnodeImport.c @@ -119,7 +119,7 @@ int vnodeFindKeyInCache(SImportInfo *pImport, int order) { if (pInfo->commitPoint >= pObj->pointsPerBlock) pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; pImport->pos = 0; pImport->key = 0; - dTrace("vid:%d sid:%d id:%s, key:%ld, import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); + dTrace("vid:%d sid:%d id:%s, key:%" PRId64 ", import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); code = 0; } else { pImport->slot = query.slot; @@ -184,8 +184,8 @@ int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi vnodeGetValidDataRange(pObj->vnode, now, &minKey, &maxKey); if (firstKey < minKey || firstKey > maxKey || lastKey < minKey || lastKey > maxKey) { dError( - "vid:%d sid:%d id:%s, invalid timestamp to import, rows:%d firstKey: %ld lastKey: %ld minAllowedKey:%ld " - "maxAllowedKey:%ld", + "vid:%d sid:%d id:%s, invalid timestamp to import, rows:%d firstKey: %" PRId64 " lastKey: %" PRId64 " minAllowedKey:%" PRId64 " " + "maxAllowedKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, minKey, maxKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } @@ -221,7 +221,7 @@ int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi SImportInfo import = {0}; - dTrace("vid:%d sid:%d id:%s, try to import %d rows data, firstKey:%ld, lastKey:%ld, object lastKey:%ld", + dTrace("vid:%d sid:%d id:%s, try to import %d rows data, firstKey:%" PRId64 ", lastKey:%" PRId64 ", object lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, pObj->lastKey); import.firstKey = firstKey; @@ -491,7 +491,7 @@ static int vnodeLoadNeededBlockData(SMeterObj *pObj, SImportHandle *pHandle, int lseek(dfd, pBlock->offset, SEEK_SET); if (read(dfd, (void *)(pHandle->pField), pHandle->pFieldSize) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to read data file, size:%ld reason:%s", pVnode->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to read data file, size:%zu reason:%s", pVnode->vnode, pObj->sid, pObj->meterId, pHandle->pFieldSize, strerror(errno)); *code = TSDB_CODE_FILE_CORRUPTED; return -1; @@ -610,7 +610,7 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { lseek(pVnode->nfd, 0, SEEK_END); lseek(pVnode->hfd, pHandle->nextNo0Offset, SEEK_SET); if (tsendfile(pVnode->nfd, pVnode->hfd, NULL, pHandle->hfSize - pHandle->nextNo0Offset) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to sendfile, size:%ld, reason:%s", pObj->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to sendfile, size:%" PRId64 ", reason:%s", pObj->vnode, pObj->sid, pObj->meterId, pHandle->hfSize - pHandle->nextNo0Offset, strerror(errno)); return -1; } @@ -627,7 +627,7 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { taosCalcChecksumAppend(0, (uint8_t *)(pHandle->pHeader), pHandle->pHeaderSize); lseek(pVnode->nfd, TSDB_FILE_HEADER_LEN, SEEK_SET); if (twrite(pVnode->nfd, (void *)(pHandle->pHeader), pHandle->pHeaderSize) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to wirte SCompHeader part, size:%ld, reason:%s", pObj->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to wirte SCompHeader part, size:%zu, reason:%s", pObj->vnode, pObj->sid, pObj->meterId, pHandle->pHeaderSize, strerror(errno)); return -1; } @@ -911,6 +911,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int blockIter.nextKey = maxFileKey + 1; } else { // Case 3. need to search the block for slot and pos if (key == minKey || key == maxKey) { + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -939,6 +940,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int } while (left < right); if (key == blockMinKey || key == blockMaxKey) { // duplicate key + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -955,6 +957,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int if (key == importHandle.pBlocks[blockIter.slot].keyFirst || key == importHandle.pBlocks[blockIter.slot].keyLast) { + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -976,6 +979,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, pBlock->numOfPoints, key, TSQL_SO_ASC); assert(pos != 0); if (KEY_AT_INDEX(importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY), pos) == key) { + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -1106,6 +1110,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) == KEY_AT_INDEX(importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY), blockIter.pos)) { // duplicate key + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } else if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) < @@ -1320,7 +1325,10 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int pImport->lastKey = lastKey; for (payloadIter = 0; payloadIter < rows; payloadIter++) { TSKEY key = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); - if (key == pObj->lastKey) continue; + if (key == pObj->lastKey) { + if (tsAffectedRowsMod) rowsImported++; + continue; + } if (key > pObj->lastKey) { // Just as insert pImport->slot = pInfo->currentSlot; pImport->pos = pInfo->cacheBlocks[pImport->slot]->numOfPoints; @@ -1333,11 +1341,12 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int } if (pImport->firstKey != pImport->key) break; + if (tsAffectedRowsMod) rowsImported++; } } if (payloadIter == rows) { - pImport->importedRows = 0; + pImport->importedRows += rowsImported; code = 0; goto _exit; } @@ -1470,6 +1479,7 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int payloadIter++; } else { + if (tsAffectedRowsMod) rowsImported++; payloadIter++; continue; } @@ -1518,7 +1528,7 @@ int vnodeImportDataToFiles(SImportInfo *pImport, char *payload, const int rows) assert(nrows > 0); - dTrace("vid:%d sid:%d meterId:%s, %d rows of data will be imported to file %d, srow:%d firstKey:%ld lastKey:%ld", + dTrace("vid:%d sid:%d meterId:%s, %d rows of data will be imported to file %d, srow:%d firstKey:%" PRId64 " lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, nrows, fid, srow, KEY_AT_INDEX(payload, pObj->bytesPerPoint, srow), KEY_AT_INDEX(payload, pObj->bytesPerPoint, (srow + nrows - 1))); diff --git a/src/system/detail/src/vnodeMeter.c b/src/system/detail/src/vnodeMeter.c index 7cb4870eb27289e16c6ea3b040751a5af2a986a9..79610a73ad1b368def881f3f80979cb790f76bfe 100644 --- a/src/system/detail/src/vnodeMeter.c +++ b/src/system/detail/src/vnodeMeter.c @@ -77,8 +77,14 @@ int vnodeCreateMeterObjFile(int vnode) { sprintf(fileName, "%s/vnode%d/meterObj.v%d", tsDirectory, vnode, vnode); fp = fopen(fileName, "w+"); if (fp == NULL) { - dError("failed to create vnode:%d file:%s", vnode, fileName); - return -1; + dError("failed to create vnode:%d file:%s, errno:%d, reason:%s", vnode, fileName, errno, strerror(errno)); + if (errno == EACCES) { + return TSDB_CODE_NO_DISK_PERMISSIONS; + } else if (errno == ENOSPC) { + return TSDB_CODE_SERV_NO_DISKSPACE; + } else { + return TSDB_CODE_VG_INIT_FAILED; + } } else { vnodeCreateFileHeader(fp); vnodeUpdateVnodeFileHeader(fp, vnodeList + vnode); @@ -93,7 +99,7 @@ int vnodeCreateMeterObjFile(int vnode) { fclose(fp); } - return 0; + return TSDB_CODE_SUCCESS; } FILE *vnodeOpenMeterObjFile(int vnode) { @@ -271,7 +277,7 @@ int vnodeSaveVnodeCfg(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { /* vnodeUpdateFileCheckSum(fp); */ fclose(fp); - return 0; + return TSDB_CODE_SUCCESS; } int vnodeSaveVnodeInfo(int vnode) { @@ -347,7 +353,7 @@ int vnodeRestoreMeterObj(char *buffer, int64_t length) { // taosSetSecurityInfo(pObj->vnode, pObj->sid, pObj->meterId, pObj->spi, pObj->encrypt, pObj->secret, pObj->cipheringKey); - dTrace("vid:%d sid:%d id:%s, meter is restored, uid:%ld", pObj->vnode, pObj->sid, pObj->meterId, pObj->uid); + dTrace("vid:%d sid:%d id:%s, meter is restored, uid:%" PRIu64 "", pObj->vnode, pObj->sid, pObj->meterId, pObj->uid); return TSDB_CODE_SUCCESS; } @@ -487,7 +493,7 @@ int vnodeCreateMeterObj(SMeterObj *pNew, SConnSec *pSec) { vnodeSaveMeterObjToFile(pNew); // vnodeCreateMeterMgmt(pNew, pSec); vnodeCreateStream(pNew); - dTrace("vid:%d, sid:%d id:%s, meterObj is created, uid:%ld", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); + dTrace("vid:%d, sid:%d id:%s, meterObj is created, uid:%" PRIu64 "", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); } return code; @@ -563,11 +569,19 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi if (numOfPoints >= (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock) { code = TSDB_CODE_BATCH_SIZE_TOO_BIG; - dError("vid:%d sid:%d id:%s, batch size too big, it shall be smaller than:%d", pObj->vnode, pObj->sid, - pObj->meterId, (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock); + dError("vid:%d sid:%d id:%s, batch size too big, insert points:%d, it shall be smaller than:%d", pObj->vnode, pObj->sid, + pObj->meterId, numOfPoints, (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock); return code; } + /* + * please refer to TBASE-926, data may be lost when the cache is full + */ + if (source == TSDB_DATA_SOURCE_SHELL && pVnode->cfg.replications > 1) { + code = vnodeForwardToPeer(pObj, cont, contLen, TSDB_ACTION_INSERT, sversion); + if (code != TSDB_CODE_SUCCESS) return code; + } + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; if (pObj->freePoints < numOfPoints || pObj->freePoints < (pObj->pointsPerBlock << 1) || pPool->notFreeSlots > pVnode->cfg.cacheNumOfBlocks.totalBlocks - 2) { @@ -585,17 +599,17 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi if (code != TSDB_CODE_SUCCESS) return code; } - if (source == TSDB_DATA_SOURCE_SHELL && pVnode->cfg.replications > 1) { - code = vnodeForwardToPeer(pObj, cont, contLen, TSDB_ACTION_INSERT, sversion); - if (code != TSDB_CODE_SUCCESS) return code; - } - if (pObj->sversion < sversion) { dTrace("vid:%d sid:%d id:%s, schema is changed, new:%d old:%d", pObj->vnode, pObj->sid, pObj->meterId, sversion, pObj->sversion); vnodeSendMeterCfgMsg(pObj->vnode, pObj->sid); code = TSDB_CODE_ACTION_IN_PROGRESS; return code; + } else if (pObj->sversion > sversion) { + dTrace("vid:%d sid:%d id:%s, client schema out of date, sql is invalid. client sversion:%d vnode sversion:%d", + pObj->vnode, pObj->sid, pObj->meterId, pObj->sversion, sversion); + code = TSDB_CODE_INVALID_SQL; + return code; } pData = pSubmit->payLoad; @@ -607,7 +621,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi TSKEY minAllowedKey = (cfid - pVnode->maxFiles + 1)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision]; TSKEY maxAllowedKey = (cfid + 2)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision] - 2; if (firstKey < minAllowedKey || firstKey > maxAllowedKey || lastKey < minAllowedKey || lastKey > maxAllowedKey) { - dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%lld, data is out of range, numOfPoints:%d firstKey:%lld lastKey:%lld minAllowedKey:%lld maxAllowedKey:%lld", + dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%" PRId64 ", data is out of range, numOfPoints:%d firstKey:%" PRId64 " lastKey:%" PRId64 " minAllowedKey:%" PRId64 " maxAllowedKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pVnode->lastKeyOnFile, numOfPoints,firstKey, lastKey, minAllowedKey, maxAllowedKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } @@ -626,7 +640,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi } if (*((TSKEY *)pData) <= pObj->lastKey) { - dWarn("vid:%d sid:%d id:%s, received key:%ld not larger than lastKey:%ld", pObj->vnode, pObj->sid, pObj->meterId, + dWarn("vid:%d sid:%d id:%s, received key:%" PRId64 " not larger than lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, *((TSKEY *)pData), pObj->lastKey); pData += pObj->bytesPerPoint; continue; @@ -655,6 +669,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi if (pObj->lastKey > pVnode->lastKey) pVnode->lastKey = pObj->lastKey; if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + assert(pVnode->firstKey > 0); pVnode->version++; @@ -663,7 +678,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi vnodeClearMeterState(pObj, TSDB_METER_STATE_INSERTING); _over: - dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%ld source:%d, vnode total storage: %ld", + dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%" PRId64 " source:%d, vnode total storage: %" PRId64 "", pObj->vnode, pObj->sid, pObj->meterId, points, numOfPoints, pObj->lastKey, source, pVnode->vnodeStatistic.totalStorage); diff --git a/src/system/detail/src/vnodeQueryImpl.c b/src/system/detail/src/vnodeQueryImpl.c index e3b1f60d495af859b7ce293f30be42e333c1d4d4..81fce50bd6f950c2f50fc60efb4ce251481855bb 100644 --- a/src/system/detail/src/vnodeQueryImpl.c +++ b/src/system/detail/src/vnodeQueryImpl.c @@ -14,6 +14,8 @@ */ #include "os.h" +#include "hash.h" +#include "hashutil.h" #include "taosmsg.h" #include "textbuffer.h" #include "ttime.h" @@ -47,21 +49,17 @@ enum { #define IS_DISK_DATA_BLOCK(q) ((q)->fileId >= 0) -// static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFilesInfo *pQueryFile, char *buf, uint64_t -// offset, int32_t size); static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFilesInfo *pQueryFile, char *buf, uint64_t offset, int32_t size); -//__read_data_fn_t readDataFunctor[2] = {copyDataFromMMapBuffer, readDataFromDiskFile}; - -static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo); +static void vnodeInitLoadCompBlockInfo(SLoadCompBlockInfo *pCompBlockLoadInfo); static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn, bool loadData); static int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv, SMeterDataInfo *pMeterHeadDataInfo, int32_t start, int32_t end); -static TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index); +static TSKEY getTimestampInCacheBlock(SQueryRuntimeEnv *pRuntimeEnv, SCacheBlock *pBlock, int32_t index); static TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index); static void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, int32_t pos); @@ -69,23 +67,26 @@ static int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj static void setGroupOutputBuffer(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResult); -static void getAlignedIntervalQueryRange(SQuery *pQuery, TSKEY keyInData, TSKEY skey, TSKEY ekey); -static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pInfo, - SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields, +static void getAlignedIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY keyInData, TSKEY skey, TSKEY ekey); +static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, + SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, SField *pFields, __block_search_fn_t searchFn); static int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult); -static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, - int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, - SField *pFields, __block_search_fn_t searchFn); +static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pMeterDataInfo, + SBlockInfo *pBlockInfo, int32_t blockStatus, SField *pFields, + __block_search_fn_t searchFn); static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx); static int32_t flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, const SQueryRuntimeEnv *pRuntimeEnv); static void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes); static void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid); -static void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn); +static TSKEY getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn); static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); +static void doGetAlignedIntervalQueryRangeImpl(SQuery *pQuery, int64_t pKey, int64_t keyFirst, int64_t keyLast, + int64_t *actualSkey, int64_t *actualEkey, int64_t *skey, int64_t *ekey); +static void getNextLogicalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow* pTimeWindow); // check the offset value integrity static FORCE_INLINE int32_t validateHeaderOffsetSegment(SQInfo *pQInfo, char *filePath, int32_t vid, char *data, @@ -108,8 +109,9 @@ static FORCE_INLINE int32_t getCompHeaderStartPosition(SVnodeCfg *pCfg) { static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *pMeterObj, SCompHeader *pCompHeader, SQueryFilesInfo *pQueryFileInfo, int32_t headerSize) { if (pCompHeader->compInfoOffset < headerSize || pCompHeader->compInfoOffset > pQueryFileInfo->headerFileSize) { - dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid, size:%ld", pQInfo, pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, pCompHeader->compInfoOffset, pQueryFileInfo->headerFileSize); + dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%" PRId64 " is not valid, size:%" PRId64, pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pCompHeader->compInfoOffset, + pQueryFileInfo->headerFileSize); return -1; } @@ -121,8 +123,8 @@ static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *p static FORCE_INLINE int32_t validateCompBlockInfoSegment(SQInfo *pQInfo, const char *filePath, int32_t vid, SCompInfo *compInfo, int64_t offset) { if (!taosCheckChecksumWhole((uint8_t *)compInfo, sizeof(SCompInfo))) { - dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%lld", pQInfo, vid, filePath, - offset); + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%" PRId64, pQInfo, vid, + filePath, offset); return -1; } return 0; @@ -133,7 +135,7 @@ static FORCE_INLINE int32_t validateCompBlockSegment(SQInfo *pQInfo, const char uint32_t size = compInfo->numOfBlocks * sizeof(SCompBlock); if (checksum != taosCalcChecksum(0, (uint8_t *)pBlock, size)) { - dLError("QInfo:%p vid:%d, failed to read header file:%s, file compblock is broken:%ld", pQInfo, vid, filePath, + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compblock is broken:%zu", pQInfo, vid, filePath, (char *)compInfo + sizeof(SCompInfo)); return -1; } @@ -163,6 +165,30 @@ bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) { return false; } +int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) { + assert(pGroupbyExpr != NULL); + + int32_t colId = -2; + int16_t type = TSDB_DATA_TYPE_NULL; + + for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) { + SColIndexEx *pColIndex = &pGroupbyExpr->columnInfo[i]; + if (pColIndex->flag == TSDB_COL_NORMAL) { + colId = pColIndex->colId; + break; + } + } + + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + if (colId == pQuery->colList[i].data.colId) { + type = pQuery->colList[i].data.type; + break; + } + } + + return type; +} + bool isSelectivityWithTagsQuery(SQuery *pQuery) { bool hasTags = false; int32_t numOfSelectivity = 0; @@ -197,7 +223,7 @@ static bool vnodeIsCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj SQuery *pQuery = pRuntimeEnv->pQuery; // check if data file header of this table has been loaded into memory, avoid to reloaded comp Block info - SQueryLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; + SLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; // if vnodeFreeFields is called, the pQuery->pFields is NULL if (pLoadCompBlockInfo->fileListIndex == fileIndex && pLoadCompBlockInfo->sid == pMeterObj->sid && @@ -211,14 +237,14 @@ static bool vnodeIsCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj } static void vnodeSetCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIndex, int32_t sid) { - SQueryLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; + SLoadCompBlockInfo *pCompBlockLoadInfo = &pRuntimeEnv->loadCompBlockInfo; - pLoadCompBlockInfo->sid = sid; - pLoadCompBlockInfo->fileListIndex = fileIndex; - pLoadCompBlockInfo->fileId = pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID; + pCompBlockLoadInfo->sid = sid; + pCompBlockLoadInfo->fileListIndex = fileIndex; + pCompBlockLoadInfo->fileId = pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID; } -static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo) { +static void vnodeInitLoadCompBlockInfo(SLoadCompBlockInfo *pCompBlockLoadInfo) { pCompBlockLoadInfo->sid = -1; pCompBlockLoadInfo->fileId = -1; pCompBlockLoadInfo->fileListIndex = -1; @@ -226,14 +252,12 @@ static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadIn static int32_t vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex, bool loadPrimaryTS) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; + SQuery * pQuery = pRuntimeEnv->pQuery; + SLoadDataBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; /* this block has been loaded into memory, return directly */ if (pLoadInfo->fileId == pQuery->fileId && pLoadInfo->slotIdx == pQuery->slot && pQuery->slot != -1 && - pLoadInfo->sid == pMeterObj->sid) { - assert(fileIndex == pLoadInfo->fileListIndex); - + pLoadInfo->sid == pMeterObj->sid && pLoadInfo->fileListIndex == fileIndex) { // previous load operation does not load the primary timestamp column, we only need to load the timestamp column if (pLoadInfo->tsLoaded == false && pLoadInfo->tsLoaded != loadPrimaryTS) { return DISK_BLOCK_LOAD_TS; @@ -247,8 +271,8 @@ static int32_t vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj * static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex, bool tsLoaded) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; + SQuery * pQuery = pRuntimeEnv->pQuery; + SLoadDataBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; pLoadInfo->fileId = pQuery->fileId; pLoadInfo->slotIdx = pQuery->slot; @@ -257,7 +281,7 @@ static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj pLoadInfo->tsLoaded = tsLoaded; } -static void vnodeInitDataBlockInfo(SQueryLoadBlockInfo *pBlockLoadInfo) { +static void vnodeInitDataBlockInfo(SLoadDataBlockInfo *pBlockLoadInfo) { pBlockLoadInfo->slotIdx = -1; pBlockLoadInfo->fileId = -1; pBlockLoadInfo->sid = -1; @@ -322,7 +346,7 @@ static void doCloseQueryFileInfoFD(SQueryFilesInfo *pVnodeFilesInfo) { tclose(pVnodeFilesInfo->headerFd); tclose(pVnodeFilesInfo->dataFd); tclose(pVnodeFilesInfo->lastFd); - + pVnodeFilesInfo->current = -1; pVnodeFilesInfo->headerFileSize = -1; } @@ -341,7 +365,7 @@ static void doInitQueryFileInfoFD(SQueryFilesInfo *pVnodeFilesInfo) { */ static int32_t doOpenQueryFile(SQInfo *pQInfo, SQueryFilesInfo *pVnodeFileInfo) { SHeaderFileInfo *pHeaderFileInfo = &pVnodeFileInfo->pFileInfo[pVnodeFileInfo->current]; - + /* * current header file is empty or broken, return directly. * @@ -351,10 +375,10 @@ static int32_t doOpenQueryFile(SQInfo *pQInfo, SQueryFilesInfo *pVnodeFileInfo) if (checkIsHeaderFileEmpty(pVnodeFileInfo)) { qTrace("QInfo:%p vid:%d, fileId:%d, index:%d, size:%d, ignore file, empty or broken", pQInfo, pVnodeFileInfo->vnodeId, pHeaderFileInfo->fileID, pVnodeFileInfo->current, pVnodeFileInfo->headerFileSize); - + return -1; } - + pVnodeFileInfo->headerFd = open(pVnodeFileInfo->headerFilePath, O_RDONLY); if (!FD_VALID(pVnodeFileInfo->headerFd)) { dError("QInfo:%p failed open head file:%s reason:%s", pQInfo, pVnodeFileInfo->headerFilePath, strerror(errno)); @@ -381,7 +405,7 @@ static void doCloseQueryFiles(SQueryFilesInfo *pVnodeFileInfo) { assert(pVnodeFileInfo->current < pVnodeFileInfo->numOfFiles && pVnodeFileInfo->current >= 0); pVnodeFileInfo->headerFileSize = -1; - + doCloseQueryFileInfoFD(pVnodeFileInfo); } @@ -456,38 +480,38 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim if (ret != TSDB_CODE_SUCCESS) { return -1; // failed to load the header file data into memory } - - char* buf = calloc(1, getCompHeaderSegSize(pCfg)); + + char * buf = calloc(1, getCompHeaderSegSize(pCfg)); SQueryFilesInfo *pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; - + lseek(pVnodeFileInfo->headerFd, TSDB_FILE_HEADER_LEN, SEEK_SET); read(pVnodeFileInfo->headerFd, buf, getCompHeaderSegSize(pCfg)); - + // check the offset value integrity - if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, buf - TSDB_FILE_HEADER_LEN, - getCompHeaderSegSize(pCfg)) < 0) { + if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, + buf - TSDB_FILE_HEADER_LEN, getCompHeaderSegSize(pCfg)) < 0) { free(buf); return -1; } SCompHeader *compHeader = (SCompHeader *)(buf + sizeof(SCompHeader) * pMeterObj->sid); - + // no data in this file for specified meter, abort if (compHeader->compInfoOffset == 0) { free(buf); return 0; } - + // corrupted file may cause the invalid compInfoOffset, check needs if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, &pRuntimeEnv->vnodeFileInfo, getCompHeaderStartPosition(pCfg)) < 0) { free(buf); return -1; } - + lseek(pVnodeFileInfo->headerFd, compHeader->compInfoOffset, SEEK_SET); - - SCompInfo compInfo = {0}; + + SCompInfo compInfo = {0}; read(pVnodeFileInfo->headerFd, &compInfo, sizeof(SCompInfo)); // check compblock info integrity @@ -517,14 +541,14 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim // prepare buffer to hold compblock data if (pQuery->blockBufferSize != bufferSize) { pQuery->pBlock = realloc(pQuery->pBlock, bufferSize); - pQuery->blockBufferSize = (int32_t) bufferSize; + pQuery->blockBufferSize = (int32_t)bufferSize; } memset(pQuery->pBlock, 0, bufferSize); - + // read data: comp block + checksum read(pVnodeFileInfo->headerFd, pQuery->pBlock, compBlockSize + sizeof(TSCKSUM)); - TSCKSUM checksum = *(TSCKSUM*)((char*)pQuery->pBlock + compBlockSize); + TSCKSUM checksum = *(TSCKSUM *)((char *)pQuery->pBlock + compBlockSize); // check comp block integrity if (validateCompBlockSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, &compInfo, (char *)pQuery->pBlock, @@ -543,7 +567,7 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim pSummary->totalCompInfoSize += compBlockSize; pSummary->loadCompInfoUs += (et - st); - + free(buf); return pQuery->numOfBlocks; } @@ -752,7 +776,7 @@ static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFilesInfo *pQueryFileInfo // check column data integrity if (checksum != taosCalcChecksum(0, (const uint8_t *)dst, pFields[col].len)) { - dLError("QInfo:%p, column data checksum error, file:%s, col: %d, offset:%ld", GET_QINFO_ADDR(pQuery), + dLError("QInfo:%p, column data checksum error, file:%s, col: %d, offset:%" PRId64, GET_QINFO_ADDR(pQuery), pQueryFileInfo->dataFilePath, col, offset); return -1; @@ -794,8 +818,8 @@ static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SCompBlock // check fields integrity if (!taosCheckChecksumWhole((uint8_t *)(*pField), size)) { - dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%lld", pQInfo, - pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pVnodeFilesInfo->dataFilePath, + dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%" PRId64, + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pVnodeFilesInfo->dataFilePath, pBlock->offset); return -1; } @@ -849,20 +873,20 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR int32_t status = vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); if (status == DISK_BLOCK_NO_NEED_TO_LOAD) { dTrace( - "QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, no need to load again, ts:%d, slot:%d, " - "brange:%lld-%lld, rows:%d", + "QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, no need to load again, ts:%d, slot:%d," + " brange:%lld-%lld, rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, loadPrimaryCol, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); - + if (loadSField && (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL)) { loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, &pQuery->pFields[pQuery->slot]); } - + return TSDB_CODE_SUCCESS; } else if (status == DISK_BLOCK_LOAD_TS) { dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, incrementally load ts", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId); - + assert(PRIMARY_TSCOL_LOADED(pQuery) == false && loadSField == true); if (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL) { loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, &pQuery->pFields[pQuery->slot]); @@ -870,7 +894,7 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR // load primary timestamp int32_t ret = loadPrimaryTSColumn(pRuntimeEnv, pBlock, pField, &columnBytes); - + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); return ret; } @@ -967,7 +991,7 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR } // todo ignore the blockType, pass the pQuery into this function -SBlockInfo getBlockBasicInfo(void *pBlock, int32_t blockType) { +SBlockInfo getBlockBasicInfo(SQueryRuntimeEnv *pRuntimeEnv, void *pBlock, int32_t blockType) { SBlockInfo blockInfo = {0}; if (IS_FILE_BLOCK(blockType)) { SCompBlock *pDiskBlock = (SCompBlock *)pBlock; @@ -979,8 +1003,8 @@ SBlockInfo getBlockBasicInfo(void *pBlock, int32_t blockType) { } else { SCacheBlock *pCacheBlock = (SCacheBlock *)pBlock; - blockInfo.keyFirst = getTimestampInCacheBlock(pCacheBlock, 0); - blockInfo.keyLast = getTimestampInCacheBlock(pCacheBlock, pCacheBlock->numOfPoints - 1); + blockInfo.keyFirst = getTimestampInCacheBlock(pRuntimeEnv, pCacheBlock, 0); + blockInfo.keyLast = getTimestampInCacheBlock(pRuntimeEnv, pCacheBlock, pCacheBlock->numOfPoints - 1); blockInfo.size = pCacheBlock->numOfPoints; blockInfo.numOfCols = pCacheBlock->pMeterObj->numOfColumns; } @@ -1013,7 +1037,7 @@ static bool checkQueryRangeAgainstNextBlock(SBlockInfo *pBlockInfo, SQueryRuntim */ static bool queryCompleteInBlock(SQuery *pQuery, SBlockInfo *pBlockInfo, int32_t forwardStep) { if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) { - assert(pQuery->checkBufferInLoop == 1 && pQuery->over == QUERY_RESBUF_FULL && pQuery->pointsOffset == 0); + // assert(pQuery->checkBufferInLoop == 1 && pQuery->over == QUERY_RESBUF_FULL && pQuery->pointsOffset == 0); assert((QUERY_IS_ASC_QUERY(pQuery) && forwardStep + pQuery->pos <= pBlockInfo->size) || (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->pos - forwardStep + 1 >= 0)); @@ -1050,36 +1074,169 @@ void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, in position->pos = pos; } -static FORCE_INLINE void saveNextAccessPositionInCache(SPositionInfo *position, int32_t slotIdx, int32_t pos) { - savePointPosition(position, -1, slotIdx, pos); +bool isCacheBlockValid(SQuery *pQuery, SCacheBlock *pBlock, SMeterObj *pMeterObj) { + if (pMeterObj != pBlock->pMeterObj || pBlock->blockId > pQuery->blockId) { + SMeterObj *pNewMeterObj = pBlock->pMeterObj; + char * id = (pNewMeterObj != NULL) ? pNewMeterObj->meterId : NULL; + + dWarn( + "QInfo:%p vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d, meterObj:%p, " + "blockMeterObj:%p, blockMeter id:%s, first:%d, last:%d, numOfBlocks:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->blockId, + pQuery->blockId, pMeterObj, pNewMeterObj, id, pQuery->firstSlot, pQuery->currentSlot, pQuery->numOfBlocks); + + return false; + } + + /* + * The check for empty block: + * pBlock->numOfPoints == 0. There is a empty block, which is caused by allocate-and-write data into cache + * procedure. The block has been allocated but data has not been put into yet. If the block is the last + * block(newly allocated block), abort query. Otherwise, skip it and go on. + */ + if (pBlock->numOfPoints == 0) { + dWarn( + "QInfo:%p vid:%d sid:%d id:%s, cache block is empty. slot:%d first:%d, last:%d, numOfBlocks:%d," + "allocated but not write data yet.", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pQuery->firstSlot, + pQuery->currentSlot, pQuery->numOfBlocks); + + return false; + } + + return true; } // todo all functions that call this function should check the returned data blocks status -SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t slot) { +SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntimeEnv, int32_t slot) { + SQuery *pQuery = pRuntimeEnv->pQuery; + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; - if (pCacheInfo == NULL || pCacheInfo->cacheBlocks == NULL || slot < 0) { + if (pCacheInfo == NULL || pCacheInfo->cacheBlocks == NULL || slot < 0 || slot >= pCacheInfo->maxBlocks) { return NULL; } - assert(slot < pCacheInfo->maxBlocks); + getBasicCacheInfoSnapshot(pQuery, pCacheInfo, pMeterObj->vnode); SCacheBlock *pBlock = pCacheInfo->cacheBlocks[slot]; - if (pBlock == NULL) { - dError("QInfo:%p NULL Block In Cache, available block:%d, last block:%d, accessed null block:%d, pBlockId:%d", - GET_QINFO_ADDR(pQuery), pCacheInfo->numOfBlocks, pCacheInfo->currentSlot, slot, pQuery->blockId); + if (pBlock == NULL) { // the cache info snapshot must be existed. + int32_t curNumOfBlocks = pCacheInfo->numOfBlocks; + int32_t curSlot = pCacheInfo->currentSlot; + + dError( + "QInfo:%p NULL Block In Cache, snapshot (available blocks:%d, last block:%d), current (available blocks:%d, " + "last block:%d), accessed null block:%d, pBlockId:%d", + GET_QINFO_ADDR(pQuery), pQuery->numOfBlocks, pQuery->currentSlot, curNumOfBlocks, curSlot, slot, + pQuery->blockId); + return NULL; } - if (pMeterObj != pBlock->pMeterObj || pBlock->blockId > pQuery->blockId) { - dWarn( - "QInfo:%p vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d, meterObj:%p, " - "blockMeterObj:%p", - GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->blockId, - pQuery->blockId, pMeterObj, pBlock->pMeterObj); + // block is empty or block does not belongs to current table, return NULL value + if (!isCacheBlockValid(pQuery, pBlock, pMeterObj)) { + return NULL; + } + + // the accessed cache block has been loaded already, return directly + if (vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, -1, true) == DISK_BLOCK_NO_NEED_TO_LOAD) { + TSKEY skey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, 0); + TSKEY ekey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, pBlock->numOfPoints - 1); + + dTrace( + "QInfo:%p vid:%d sid:%d id:%s, fileId:%d, cache block has been loaded, no need to load again, ts:%d, " + "slot:%d, brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, 1, pQuery->slot, + skey, ekey, pBlock->numOfPoints); + + return &pRuntimeEnv->cacheBlock; + } + + // keep the structure as well as the block data into local buffer + memcpy(&pRuntimeEnv->cacheBlock, pBlock, sizeof(SCacheBlock)); + + SCacheBlock *pNewBlock = &pRuntimeEnv->cacheBlock; + + // the commit data points will be ignored + int32_t offset = 0; + int32_t numOfPoints = pNewBlock->numOfPoints; + if (pQuery->firstSlot == pQuery->commitSlot) { + assert(pQuery->commitPoint >= 0 && pQuery->commitPoint <= pNewBlock->numOfPoints); + + offset = pQuery->commitPoint; + numOfPoints = pNewBlock->numOfPoints - offset; + + if (offset != 0) { + dTrace( + "%p ignore the data in cache block that are commit already, numOfblock:%d slot:%d ignore points:%d. " + "first:%d last:%d", + GET_QINFO_ADDR(pQuery), pQuery->numOfBlocks, pQuery->slot, pQuery->commitPoint, pQuery->firstSlot, + pQuery->currentSlot); + } + + pNewBlock->numOfPoints = numOfPoints; + + // current block are all commit already, ignore it + if (pNewBlock->numOfPoints == 0) { + dTrace( + "%p ignore current in cache block that are all commit already, numOfblock:%d slot:%d" + "first:%d last:%d", + GET_QINFO_ADDR(pQuery), pQuery->numOfBlocks, pQuery->slot, pQuery->firstSlot, pQuery->currentSlot); + return NULL; + } + } + + // keep the data from in cache into the temporarily allocated buffer + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + SColumnInfoEx *pColumnInfoEx = &pQuery->colList[i]; + + int16_t columnIndex = pColumnInfoEx->colIdx; + int16_t columnIndexInBuf = pColumnInfoEx->colIdxInBuf; + + SColumn *pCol = &pMeterObj->schema[columnIndex]; + + int16_t bytes = pCol->bytes; + int16_t type = pCol->type; + + char *dst = pRuntimeEnv->colDataBuffer[columnIndexInBuf]->data; + + if (pQuery->colList[i].colIdx != -1) { + assert(pCol->colId == pQuery->colList[i].data.colId && bytes == pColumnInfoEx->data.bytes && + type == pColumnInfoEx->data.type); + + memcpy(dst, pBlock->offset[columnIndex] + offset * bytes, numOfPoints * bytes); + } else { + setNullN(dst, type, bytes, numOfPoints); + } + } + + assert(numOfPoints == pNewBlock->numOfPoints); + + // if the primary timestamp are not loaded by default, always load it here into buffer + if (!PRIMARY_TSCOL_LOADED(pQuery)) { + memcpy(pRuntimeEnv->primaryColBuffer->data, pBlock->offset[0] + offset * TSDB_KEYSIZE, TSDB_KEYSIZE * numOfPoints); + } + + pQuery->fileId = -1; + pQuery->slot = slot; + + if (!isCacheBlockValid(pQuery, pNewBlock, pMeterObj)) { return NULL; } - return pBlock; + /* + * the accessed cache block still belongs to current meterObj, go on + * update the load data block info + */ + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, -1, true); + + TSKEY skey = getTimestampInCacheBlock(pRuntimeEnv, pNewBlock, 0); + TSKEY ekey = getTimestampInCacheBlock(pRuntimeEnv, pNewBlock, numOfPoints - 1); + + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, load cache block, ts:%d, slot:%d, brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, 1, pQuery->slot, + skey, ekey, numOfPoints); + + return pNewBlock; } static SCompBlock *getDiskDataBlock(SQuery *pQuery, int32_t slot) { @@ -1087,11 +1244,13 @@ static SCompBlock *getDiskDataBlock(SQuery *pQuery, int32_t slot) { return &pQuery->pBlock[slot]; } -static void *getGenericDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t slot) { +static void *getGenericDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntimeEnv, int32_t slot) { + SQuery *pQuery = pRuntimeEnv->pQuery; + if (IS_DISK_DATA_BLOCK(pQuery)) { return getDiskDataBlock(pQuery, slot); } else { - return getCacheDataBlock(pMeterObj, pQuery, slot); + return getCacheDataBlock(pMeterObj, pRuntimeEnv, slot); } } @@ -1180,14 +1339,6 @@ static bool getQualifiedDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRunti return true; } -static char *doGetDataBlockImpl(const char *sdata, int32_t colIdx, bool isDiskFileBlock) { - if (isDiskFileBlock) { - return ((SData **)sdata)[colIdx]->data; - } else { - return ((SCacheBlock *)sdata)->offset[colIdx]; - } -} - static SField *getFieldInfo(SQuery *pQuery, SBlockInfo *pBlockInfo, SField *pFields, int32_t column) { // no SField info exist, or column index larger than the output column, no result. if (pFields == NULL || column >= pQuery->numOfOutputCols) { @@ -1238,30 +1389,13 @@ static bool hasNullVal(SQuery *pQuery, int32_t col, SBlockInfo *pBlockInfo, SFie return ret; } -static char *doGetDataBlocks(bool isDiskFileBlock, SQueryRuntimeEnv *pRuntimeEnv, char *data, int32_t colIdx, - int32_t colId, int16_t type, int16_t bytes, int32_t tmpBufIndex) { - char *pData = NULL; - - if (isDiskFileBlock) { - pData = doGetDataBlockImpl(data, colIdx, isDiskFileBlock); - } else { - SCacheBlock *pCacheBlock = (SCacheBlock *)data; - SMeterObj * pMeter = pRuntimeEnv->pMeterObj; - - if (colIdx < 0 || pMeter->numOfColumns <= colIdx || pMeter->schema[colIdx].colId != colId) { - // data in cache is not current available, we need fill the data block in null value - pData = pRuntimeEnv->colDataBuffer[tmpBufIndex]->data; - setNullN(pData, type, bytes, pCacheBlock->numOfPoints); - } else { - pData = doGetDataBlockImpl(data, colIdx, isDiskFileBlock); - } - } - +static char *doGetDataBlocks(SQuery *pQuery, SData **data, int32_t colIdx) { + assert(colIdx >= 0 && colIdx < pQuery->numOfCols); + char *pData = data[colIdx]->data; return pData; } -static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeticSupport *sas, int32_t col, - int32_t size, bool isDiskFileBlock) { +static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size) { SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; @@ -1280,21 +1414,17 @@ static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeti } for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - int32_t colIdx = isDiskFileBlock ? pQuery->colList[i].colIdxInBuf : pQuery->colList[i].colIdx; - SColumnInfo *pColMsg = &pQuery->colList[i].data; - char * pData = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pColMsg->colId, pColMsg->type, - pColMsg->bytes, pQuery->colList[i].colIdxInBuf); + char * pData = doGetDataBlocks(pQuery, pRuntimeEnv->colDataBuffer, pQuery->colList[i].colIdxInBuf); sas->elemSize[i] = pColMsg->bytes; sas->data[i] = pData + pCtx->startOffset * sas->elemSize[i]; // start from the offset } + sas->numOfCols = pQuery->numOfCols; sas->offset = 0; } else { // other type of query function SColIndexEx *pCol = &pQuery->pSelectExpr[col].pBase.colInfo; - int32_t colIdx = isDiskFileBlock ? pCol->colIdxInBuf : pCol->colIdx; - if (TSDB_COL_IS_TAG(pCol->flag)) { dataBlock = NULL; } else { @@ -1303,8 +1433,7 @@ static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeti * the remain meter may not have the required column in cache actually. * So, the validation of required column in cache with the corresponding meter schema is reinforced. */ - dataBlock = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pCol->colId, pCtx[col].inputType, - pCtx[col].inputBytes, pCol->colIdxInBuf); + dataBlock = doGetDataBlocks(pQuery, pRuntimeEnv->colDataBuffer, pCol->colIdxInBuf); } } @@ -1316,17 +1445,17 @@ static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeti * @param pRuntimeEnv * @param forwardStep * @param primaryKeyCol - * @param data * @param pFields * @param isDiskFileBlock * @return the incremental number of output value, so it maybe 0 for fixed number of query, * such as count/min/max etc. */ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t forwardStep, TSKEY *primaryKeyCol, - char *data, SField *pFields, SBlockInfo *pBlockInfo, bool isDiskFileBlock) { + SField *pFields, SBlockInfo *pBlockInfo) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQuery * pQuery = pRuntimeEnv->pQuery; + bool isDiskFileBlock = IS_FILE_BLOCK(pRuntimeEnv->blockStatus); int64_t prevNumOfRes = getNumOfResult(pRuntimeEnv); SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutputCols, sizeof(SArithmeticSupport)); @@ -1337,7 +1466,7 @@ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t SField dummyField = {0}; bool hasNull = hasNullVal(pQuery, k, pBlockInfo, pFields, isDiskFileBlock); - char *dataBlock = getDataBlocks(pRuntimeEnv, data, &sasArray[k], k, forwardStep, isDiskFileBlock); + char *dataBlock = getDataBlocks(pRuntimeEnv, &sasArray[k], k, forwardStep); SField *tpField = NULL; @@ -1353,12 +1482,9 @@ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t } } - TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; - - int64_t alignedTimestamp = - taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); - setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, forwardStep, functionId, - tpField, hasNull, pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); + TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pRuntimeEnv->intervalWindow.skey : pRuntimeEnv->intervalWindow.ekey; + setExecParams(pQuery, &pCtx[k], ts, dataBlock, (char *)primaryKeyCol, forwardStep, functionId, tpField, hasNull, + pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); } /* @@ -1411,7 +1537,7 @@ static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx * if (!vnodeSupportPrefilter(pFilterInfo->info.data.type)) { continue; } - + // all points in current column are NULL, no need to check its boundary value if (pField[colIndex].numOfNullPoints == numOfTotalPoints) { continue; @@ -1446,153 +1572,339 @@ static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx * return true; } -static int32_t setGroupResultForKey(SQueryRuntimeEnv *pRuntimeEnv, char *pData, int16_t type, char *columnData) { - SOutputRes *pOutputRes = NULL; +static SOutputRes *doSetSlidingWindowFromKey(SSlidingWindowInfo *pSlidingWindowInfo, char *pData, int16_t bytes, + SWindowStatus **pStatus) { + int32_t p = -1; - // ignore the null value - if (isNull(pData, type)) { - return -1; - } + int32_t *p1 = (int32_t *)taosGetDataFromHash(pSlidingWindowInfo->hashList, pData, bytes); + if (p1 != NULL) { + p = *p1; - int64_t t = 0; - switch (type) { - case TSDB_DATA_TYPE_TINYINT: - t = GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - t = GET_INT64_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - t = GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - default: - t = GET_INT32_VAL(pData); - break; - } + pSlidingWindowInfo->curIndex = p; + if (pStatus != NULL) { + *pStatus = &pSlidingWindowInfo->pStatus[p]; + } + } else { // more than the capacity, reallocate the resources + if (pSlidingWindowInfo->size >= pSlidingWindowInfo->capacity) { + int64_t newCap = pSlidingWindowInfo->capacity * 2; - SOutputRes **p1 = (SOutputRes **)taosGetIntHashData(pRuntimeEnv->hashList, t); - if (p1 != NULL) { - pOutputRes = *p1; - } else { - // more than the threshold number, discard data that are not belong to current groups - if (pRuntimeEnv->usedIndex >= 10000) { - return -1; + char *t = realloc(pSlidingWindowInfo->pStatus, newCap * sizeof(SWindowStatus)); + if (t != NULL) { + pSlidingWindowInfo->pStatus = (SWindowStatus *)t; + memset(&pSlidingWindowInfo->pStatus[pSlidingWindowInfo->capacity], 0, sizeof(SWindowStatus) * pSlidingWindowInfo->capacity); + } else { + // todo + } + + pSlidingWindowInfo->capacity = newCap; } // add a new result set for a new group - char *b = (char *)&pRuntimeEnv->pResult[pRuntimeEnv->usedIndex++]; - pOutputRes = *(SOutputRes **)taosAddIntHash(pRuntimeEnv->hashList, t, (char *)&b); - } + if (pStatus != NULL) { + *pStatus = &pSlidingWindowInfo->pStatus[pSlidingWindowInfo->size]; + } - setGroupOutputBuffer(pRuntimeEnv, pOutputRes); - initCtxOutputBuf(pRuntimeEnv); + p = pSlidingWindowInfo->size; + pSlidingWindowInfo->curIndex = pSlidingWindowInfo->size; - return TSDB_CODE_SUCCESS; -} + pSlidingWindowInfo->size += 1; + taosAddToHashTable(pSlidingWindowInfo->hashList, pData, bytes, (char *)&pSlidingWindowInfo->curIndex, sizeof(int32_t)); + } -static char *getGroupbyColumnData(SQueryRuntimeEnv *pRuntimeEnv, SField *pFields, SBlockInfo *pBlockInfo, char *data, - bool isDiskFileBlock, int16_t *type, int16_t *bytes) { - SQuery *pQuery = pRuntimeEnv->pQuery; - char * groupbyColumnData = NULL; + return &pSlidingWindowInfo->pResult[p]; +} - int32_t col = 0; - int16_t colIndexInBuf = 0; +static int32_t initSlidingWindowInfo(SSlidingWindowInfo *pSlidingWindowInfo, int32_t threshold, int16_t type, + SOutputRes *pRes) { + pSlidingWindowInfo->capacity = threshold; + pSlidingWindowInfo->threshold = threshold; - SSqlGroupbyExpr *pGroupbyExpr = pQuery->pGroupbyExpr; + pSlidingWindowInfo->type = type; - for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) { - if (pGroupbyExpr->columnInfo[k].flag == TSDB_COL_TAG) { - continue; - } + _hash_fn_t fn = taosGetDefaultHashFunction(type); + pSlidingWindowInfo->hashList = taosInitHashTable(threshold, fn, false); - int32_t colId = pGroupbyExpr->columnInfo[k].colId; + pSlidingWindowInfo->curIndex = -1; + pSlidingWindowInfo->size = 0; + pSlidingWindowInfo->pResult = pRes; + pSlidingWindowInfo->pStatus = calloc(threshold, sizeof(SWindowStatus)); - if (isDiskFileBlock) { // get the required column data in file block according the column ID - for (int32_t i = 0; i < pBlockInfo->numOfCols; ++i) { - if (colId == pFields[i].colId) { - *type = pFields[i].type; - *bytes = pFields[i].bytes; - col = i; - break; - } - } + if (pSlidingWindowInfo->pStatus == NULL || pSlidingWindowInfo->hashList == NULL) { + return -1; + } - // this column may not in current data block and also not in the required columns list - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - if (colId == pQuery->colList[i].data.colId) { - colIndexInBuf = i; - break; - } - } - } else { // get the required data column in cache - SColumn *pSchema = pRuntimeEnv->pMeterObj->schema; + return TSDB_CODE_SUCCESS; +} - for (int32_t i = 0; i < pRuntimeEnv->pMeterObj->numOfColumns; ++i) { - if (colId == pSchema[i].colId) { - *type = pSchema[i].type; - *bytes = pSchema[i].bytes; +static void destroySlidingWindowInfo(SSlidingWindowInfo *pSlidingWindowInfo) { + if (pSlidingWindowInfo == NULL || pSlidingWindowInfo->capacity == 0) { + assert(pSlidingWindowInfo->hashList == NULL && pSlidingWindowInfo->pResult == NULL); + return; + } - col = i; - colIndexInBuf = i; - break; - } - } - } + taosCleanUpHashTable(pSlidingWindowInfo->hashList); + tfree(pSlidingWindowInfo->pStatus); +} - int32_t columnIndex = isDiskFileBlock ? colIndexInBuf : col; - groupbyColumnData = - doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, columnIndex, colId, *type, *bytes, colIndexInBuf); +void resetSlidingWindowInfo(SSlidingWindowInfo *pSlidingWindowInfo, int32_t numOfCols) { + if (pSlidingWindowInfo == NULL || pSlidingWindowInfo->capacity == 0) { + return; + } - break; + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *pOneRes = &pSlidingWindowInfo->pResult[i]; + clearGroupResultBuf(pOneRes, numOfCols); } - return groupbyColumnData; + memset(pSlidingWindowInfo->pStatus, 0, sizeof(SWindowStatus) * pSlidingWindowInfo->capacity); + + pSlidingWindowInfo->curIndex = -1; + taosCleanUpHashTable(pSlidingWindowInfo->hashList); + pSlidingWindowInfo->size = 0; + + _hash_fn_t fn = taosGetDefaultHashFunction(pSlidingWindowInfo->type); + pSlidingWindowInfo->hashList = taosInitHashTable(pSlidingWindowInfo->capacity, fn, false); + + pSlidingWindowInfo->startTime = 0; + pSlidingWindowInfo->prevSKey = 0; } -static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { - SQuery *pQuery = pRuntimeEnv->pQuery; +void clearCompletedSlidingWindows(SSlidingWindowInfo *pSlidingWindowInfo, int32_t numOfCols) { + if (pSlidingWindowInfo == NULL || pSlidingWindowInfo->capacity == 0 || pSlidingWindowInfo->size == 0) { + return; + } - STSElem elem = tsBufGetElem(pRuntimeEnv->pTSBuf); - SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + int32_t i = 0; + for (i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if (pStatus->closed) { // remove the window slot from hash table + taosDeleteFromHashTable(pSlidingWindowInfo->hashList, (const char *)&pStatus->window.skey, TSDB_KEYSIZE); + } else { + break; + } + } - // compare tag first - if (pCtx[0].tag.i64Key != elem.tag) { - return TS_JOIN_TAG_NOT_EQUALS; + if (i == 0) { + return; } - TSKEY key = *(TSKEY *)(pCtx[0].aInputElemBuf + TSDB_KEYSIZE * offset); + int32_t remain = pSlidingWindowInfo->size - i; + //clear remain list + memmove(pSlidingWindowInfo->pStatus, &pSlidingWindowInfo->pStatus[i], remain * sizeof(SWindowStatus)); + memset(&pSlidingWindowInfo->pStatus[remain], 0, (pSlidingWindowInfo->capacity - remain) * sizeof(SWindowStatus)); + + for(int32_t k = 0; k < remain; ++k) { + copyGroupResultBuf(&pSlidingWindowInfo->pResult[k], &pSlidingWindowInfo->pResult[i + k], numOfCols); + } + + for(int32_t k = remain; k < pSlidingWindowInfo->size; ++k) { + SOutputRes *pOneRes = &pSlidingWindowInfo->pResult[k]; + clearGroupResultBuf(pOneRes, numOfCols); + } -#if defined(_DEBUG_VIEW) - printf("elem in comp ts file:%lld, key:%lld, tag:%d, id:%s, query order:%d, ts order:%d, traverse:%d, index:%d\n", - elem.ts, key, elem.tag, pRuntimeEnv->pMeterObj->meterId, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder, - pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex); -#endif + pSlidingWindowInfo->size = remain; - if (QUERY_IS_ASC_QUERY(pQuery)) { - if (key < elem.ts) { - return TS_JOIN_TS_NOT_EQUALS; - } else if (key > elem.ts) { - assert(false); - } - } else { - if (key > elem.ts) { - return TS_JOIN_TS_NOT_EQUALS; - } else if (key < elem.ts) { - assert(false); - } + for(int32_t k = 0; k < pSlidingWindowInfo->size; ++k) { + SWindowStatus* pStatus = &pSlidingWindowInfo->pStatus[k]; + int32_t *p = (int32_t*) taosGetDataFromHash(pSlidingWindowInfo->hashList, (const char*)&pStatus->window.skey, TSDB_KEYSIZE); + int32_t v = *p; + v = (v - i); + + taosDeleteFromHashTable(pSlidingWindowInfo->hashList, (const char *)&pStatus->window.skey, TSDB_KEYSIZE); + + taosAddToHashTable(pSlidingWindowInfo->hashList, (const char*)&pStatus->window.skey, TSDB_KEYSIZE, + (char *)&v, sizeof(int32_t)); } + + pSlidingWindowInfo->curIndex = -1; +} - return TS_JOIN_TS_EQUAL; +int32_t numOfClosedSlidingWindow(SSlidingWindowInfo *pSlidingWindowInfo) { + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if (pStatus->closed == false) { + return i; + } + } + + return 0; } -static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); +void closeSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo, int32_t slot) { + assert(slot >= 0 && slot < pSlidingWindowInfo->size); + SWindowStatus* pStatus = &pSlidingWindowInfo->pStatus[slot]; + pStatus->closed = true; +} - if (pResInfo->complete || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { - return false; +void closeAllSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo) { + assert(pSlidingWindowInfo->size >=0 && pSlidingWindowInfo->capacity >= pSlidingWindowInfo->size); + + for(int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus* pStatus = &pSlidingWindowInfo->pStatus[i]; + pStatus->closed = true; } +} +static SWindowStatus* getSlidingWindowStatus(SSlidingWindowInfo *pSlidingWindowInfo, int32_t slot) { + return &pSlidingWindowInfo->pStatus[slot]; +} + +static bool slidingWindowClosed(SSlidingWindowInfo* pSlidingWindowInfo, int32_t slot) { + return (pSlidingWindowInfo->pStatus[slot].closed == true); +} + +static int32_t curSlidingWindow(SSlidingWindowInfo *pSlidingWindowInfo) { + assert(pSlidingWindowInfo->curIndex >= 0 && pSlidingWindowInfo->curIndex < pSlidingWindowInfo->size); + + return pSlidingWindowInfo->curIndex; +} + +// get the correct sliding window according to the handled timestamp +static STimeWindow getActiveSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo, int64_t ts, SQuery* pQuery) { + STimeWindow w = {0}; + + if (pSlidingWindowInfo->curIndex == -1) { // the first window, from the prevous stored value + w.skey = pSlidingWindowInfo->prevSKey; + w.ekey = w.skey + pQuery->nAggTimeInterval - 1; + + } else { + SWindowStatus* pStatus = getSlidingWindowStatus(pSlidingWindowInfo, curSlidingWindow(pSlidingWindowInfo)); + + if (pStatus->window.skey <= ts && pStatus->window.ekey >= ts) { + w = pStatus->window; + } else { + int64_t st = pStatus->window.skey; + + while (st > ts) { + st -= pQuery->slidingTime; + } + + while ((st + pQuery->nAggTimeInterval - 1) < ts) { + st += pQuery->slidingTime; + } + + w.skey = st; + w.ekey = w.skey + pQuery->nAggTimeInterval - 1; + } + } + + assert(ts >= w.skey && ts <= w.ekey); + return w; +} + +static int32_t setGroupResultFromKey(SQueryRuntimeEnv *pRuntimeEnv, char *pData, int16_t type, int16_t bytes) { + if (isNull(pData, type)) { // ignore the null value + return -1; + } + + SOutputRes *pOutputRes = doSetSlidingWindowFromKey(&pRuntimeEnv->swindowResInfo, pData, bytes, NULL); + if (pOutputRes == NULL) { + return -1; + } + + setGroupOutputBuffer(pRuntimeEnv, pOutputRes); + initCtxOutputBuf(pRuntimeEnv); + + return TSDB_CODE_SUCCESS; +} + +static int32_t setSlidingWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pTimeWindow) { + assert(pTimeWindow->skey < pTimeWindow->ekey); + + int64_t st = pTimeWindow->skey; + + SWindowStatus *pStatus = NULL; + SOutputRes* pOutputRes = doSetSlidingWindowFromKey(&pRuntimeEnv->swindowResInfo, (char *)&st, TSDB_KEYSIZE, + &pStatus); + + if (pOutputRes == NULL) { + return -1; + } + + pStatus->window = *pTimeWindow; + setGroupOutputBuffer(pRuntimeEnv, pOutputRes); + initCtxOutputBuf(pRuntimeEnv); + + return TSDB_CODE_SUCCESS; +} + +static char *getGroupbyColumnData(SQuery *pQuery, SData **data, int16_t *type, int16_t *bytes) { + char *groupbyColumnData = NULL; + + SSqlGroupbyExpr *pGroupbyExpr = pQuery->pGroupbyExpr; + + for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) { + if (pGroupbyExpr->columnInfo[k].flag == TSDB_COL_TAG) { + continue; + } + + int16_t colIndex = -1; + int32_t colId = pGroupbyExpr->columnInfo[k].colId; + + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + if (pQuery->colList[i].data.colId == colId) { + colIndex = i; + break; + } + } + + assert(colIndex >= 0 && colIndex < pQuery->numOfCols); + + *type = pQuery->colList[colIndex].data.type; + *bytes = pQuery->colList[colIndex].data.bytes; + + groupbyColumnData = doGetDataBlocks(pQuery, data, pQuery->colList[colIndex].colIdxInBuf); + break; + } + + return groupbyColumnData; +} + +static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + STSElem elem = tsBufGetElem(pRuntimeEnv->pTSBuf); + SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + + // compare tag first + if (pCtx[0].tag.i64Key != elem.tag) { + return TS_JOIN_TAG_NOT_EQUALS; + } + + TSKEY key = *(TSKEY *)(pCtx[0].aInputElemBuf + TSDB_KEYSIZE * offset); + +#if defined(_DEBUG_VIEW) + printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 + ", tag:%d, id:%s, query order:%d, ts order:%d, traverse:%d, index:%d\n", + elem.ts, key, elem.tag, pRuntimeEnv->pMeterObj->meterId, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder, + pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex); +#endif + + if (QUERY_IS_ASC_QUERY(pQuery)) { + if (key < elem.ts) { + return TS_JOIN_TS_NOT_EQUALS; + } else if (key > elem.ts) { + assert(false); + } + } else { + if (key > elem.ts) { + return TS_JOIN_TS_NOT_EQUALS; + } else if (key < elem.ts) { + assert(false); + } + } + + return TS_JOIN_TS_EQUAL; +} + +static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + + if (pResInfo->complete || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + return false; + } + + // in the supplementary scan, only the following functions need to be executed if (!IS_MASTER_SCAN(pRuntimeEnv) && !(functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) { @@ -1603,10 +1915,13 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx } static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t *forwardStep, TSKEY *primaryKeyCol, - char *data, SField *pFields, SBlockInfo *pBlockInfo, bool isDiskFileBlock) { + SField *pFields, SBlockInfo *pBlockInfo) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQuery * pQuery = pRuntimeEnv->pQuery; + bool isDiskFileBlock = IS_FILE_BLOCK(pRuntimeEnv->blockStatus); + SData **data = pRuntimeEnv->colDataBuffer; + int64_t prevNumOfRes = 0; bool groupbyStateValue = isGroupbyNormalCol(pQuery->pGroupbyExpr); @@ -1621,35 +1936,28 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * char *groupbyColumnData = NULL; if (groupbyStateValue) { - groupbyColumnData = getGroupbyColumnData(pRuntimeEnv, pFields, pBlockInfo, data, isDiskFileBlock, &type, &bytes); + groupbyColumnData = getGroupbyColumnData(pQuery, data, &type, &bytes); } for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; bool hasNull = hasNullVal(pQuery, k, pBlockInfo, pFields, isDiskFileBlock); - char *dataBlock = getDataBlocks(pRuntimeEnv, data, &sasArray[k], k, *forwardStep, isDiskFileBlock); - - TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; - int64_t alignedTimestamp = - taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); + char *dataBlock = getDataBlocks(pRuntimeEnv, &sasArray[k], k, *forwardStep); - setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, (*forwardStep), functionId, - pFields, hasNull, pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); + TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pRuntimeEnv->intervalWindow.skey : pRuntimeEnv->intervalWindow.ekey; + setExecParams(pQuery, &pCtx[k], ts, dataBlock, (char *)primaryKeyCol, (*forwardStep), functionId, pFields, hasNull, + pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); } // set the input column data for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; - int32_t colIdx = isDiskFileBlock ? pFilterInfo->info.colIdxInBuf : pFilterInfo->info.colIdx; - SColumnInfo * pColumnInfo = &pFilterInfo->info.data; - /* * NOTE: here the tbname/tags column cannot reach here, since it will never be a filter column, * so we do NOT check if is a tag or not */ - pFilterInfo->pData = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pColumnInfo->colId, - pColumnInfo->type, pColumnInfo->bytes, pFilterInfo->info.colIdxInBuf); + pFilterInfo->pData = doGetDataBlocks(pQuery, data, pFilterInfo->info.colIdxInBuf); } int32_t numOfRes = 0; @@ -1663,7 +1971,10 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * pQuery->order.order, pRuntimeEnv->pTSBuf->cur.order); } - for (int32_t j = 0; j < (*forwardStep); ++j) { + int32_t j = 0; + int64_t lastKey = 0; + + for (j = 0; j < (*forwardStep); ++j) { int32_t offset = GET_COL_DATA_POS(pQuery, j, step); if (pRuntimeEnv->pTSBuf != NULL) { @@ -1682,23 +1993,97 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * continue; } - // decide which group this rows belongs to according to current state value - if (groupbyStateValue) { - char *stateVal = groupbyColumnData + bytes * offset; + // sliding window query + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0) { + // decide the time window according to the primary timestamp + int64_t ts = primaryKeyCol[offset]; + + SSlidingWindowInfo* pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + STimeWindow win = getActiveSlidingWindow(pSlidingWindowInfo, ts, pQuery); - int32_t ret = setGroupResultForKey(pRuntimeEnv, stateVal, type, groupbyColumnData); + int32_t ret = setSlidingWindowFromKey(pRuntimeEnv, &win); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code continue; } - } - // all startOffset are identical - offset -= pCtx[0].startOffset; + // all startOffset are identical + offset -= pCtx[0].startOffset; + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + pCtx[k].nStartQueryTimestamp = win.skey; + + SWindowStatus* pStatus = getSlidingWindowStatus(pSlidingWindowInfo, curSlidingWindow(pSlidingWindowInfo)); + + if (!IS_MASTER_SCAN(pRuntimeEnv) && !pStatus->closed) { +// qTrace("QInfo:%p not completed in supplementary scan, ignore funcId:%d, window:%lld-%lld", +// GET_QINFO_ADDR(pQuery), functionId, pStatus->window.skey, pStatus->window.ekey); + continue; + } + + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunctionF(&pCtx[k], offset); + } + } + + lastKey = ts; + int32_t index = pRuntimeEnv->swindowResInfo.curIndex; + + STimeWindow nextWin = win; + while (1) { + getNextLogicalQueryRange(pRuntimeEnv, &nextWin); + if (pSlidingWindowInfo->startTime > nextWin.skey || (nextWin.skey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextWin.skey > pQuery->skey && !QUERY_IS_ASC_QUERY(pQuery))) { + pRuntimeEnv->swindowResInfo.curIndex = index; + break; + } + + if (ts >= nextWin.skey && ts <= nextWin.ekey) { + // null data, failed to allocate more memory buffer + if (setSlidingWindowFromKey(pRuntimeEnv, &nextWin) != TSDB_CODE_SUCCESS) { + pRuntimeEnv->swindowResInfo.curIndex = index; + break; + } + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + pCtx[k].nStartQueryTimestamp = nextWin.skey; + + SWindowStatus* pStatus = getSlidingWindowStatus(pSlidingWindowInfo, curSlidingWindow(pSlidingWindowInfo)); + if (!IS_MASTER_SCAN(pRuntimeEnv) && !pStatus->closed) { +// qTrace("QInfo:%p not completed in supplementary scan, ignore funcId:%d, window:%lld-%lld", +// GET_QINFO_ADDR(pQuery), functionId, pStatus->window.skey, pStatus->window.ekey); + continue; + } + + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunctionF(&pCtx[k], offset); + } + } + } else { + pRuntimeEnv->swindowResInfo.curIndex = index; + break; + } + } + } else { // other queries + // decide which group this rows belongs to according to current state value + if (groupbyStateValue) { + char *stateVal = groupbyColumnData + bytes * offset; + + int32_t ret = setGroupResultFromKey(pRuntimeEnv, stateVal, type, bytes); + if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code + continue; + } + } + + // all startOffset are identical + offset -= pCtx[0].startOffset; - for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { - int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - aAggs[functionId].xFunctionF(&pCtx[k], offset); + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunctionF(&pCtx[k], offset); + } } } @@ -1723,12 +2108,49 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * free(sasArray); + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0 && IS_MASTER_SCAN(pRuntimeEnv)) { + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + // query completed + if ((lastKey >= pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (lastKey <= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + closeAllSlidingWindow(pSlidingWindowInfo); + + pSlidingWindowInfo->curIndex = pSlidingWindowInfo->size - 1; + setQueryStatus(pQuery, QUERY_COMPLETED | QUERY_RESBUF_FULL); + } else { + int32_t i = 0; + int64_t skey = 0; + + for (i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if ((pStatus->window.ekey <= lastKey && QUERY_IS_ASC_QUERY(pQuery)) || + (pStatus->window.skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) { + closeSlidingWindow(pSlidingWindowInfo, i); + } else { + skey = pStatus->window.skey; + break; + } + } + + pSlidingWindowInfo->prevSKey = skey; + + // the number of completed slots are larger than the threshold, dump to client immediately. + int32_t v = numOfClosedSlidingWindow(pSlidingWindowInfo); + if (v > pSlidingWindowInfo->threshold) { + setQueryStatus(pQuery, QUERY_RESBUF_FULL); + } + + dTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pQuery), pSlidingWindowInfo->size, v); + } + } + /* * No need to calculate the number of output results for groupby normal columns * because the results of group by normal column is put into intermediate buffer. */ int32_t num = 0; - if (!groupbyStateValue) { + if (!groupbyStateValue && !(pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { num = getNumOfResult(pRuntimeEnv) - prevNumOfRes; } @@ -1798,7 +2220,7 @@ static void validateQueryRangeAndData(SQueryRuntimeEnv *pRuntimeEnv, const TSKEY } static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInfo, int64_t *pPrimaryColumn, - char *sdata, SField *pFields, __block_search_fn_t searchFn, int32_t *numOfRes) { + SField *pFields, __block_search_fn_t searchFn, int32_t *numOfRes) { int32_t forwardStep = 0; SQuery *pQuery = pRuntimeEnv->pQuery; @@ -1815,7 +2237,7 @@ static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo * // no qualified data in current block, do not update the lastKey value assert(pQuery->ekey < pPrimaryColumn[pQuery->pos]); } else { - pQuery->lastKey = pPrimaryColumn[pQuery->pos + (forwardStep - 1)] + step; + pQuery->lastKey = pQuery->ekey + step;//pPrimaryColumn[pQuery->pos + (forwardStep - 1)] + step; } } else { @@ -1833,7 +2255,7 @@ static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo * // no qualified data in current block, do not update the lastKey value assert(pQuery->ekey > pPrimaryColumn[pQuery->pos]); } else { - pQuery->lastKey = pPrimaryColumn[pQuery->pos - (forwardStep - 1)] + step; + pQuery->lastKey = pQuery->ekey + step;//pPrimaryColumn[pQuery->pos - (forwardStep - 1)] + step; } } else { forwardStep = pQuery->pos + 1; @@ -1851,14 +2273,11 @@ static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo * pQuery->lastKey = pPrimaryColumn[pQuery->pos + (newForwardStep - 1) * step] + step; } - bool isFileBlock = IS_FILE_BLOCK(pRuntimeEnv->blockStatus); - - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - *numOfRes = - rowwiseApplyAllFunctions(pRuntimeEnv, &newForwardStep, pPrimaryColumn, sdata, pFields, pBlockInfo, isFileBlock); + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr) || + (pQuery->slidingTime != -1 && pQuery->nAggTimeInterval > 0)) { + *numOfRes = rowwiseApplyAllFunctions(pRuntimeEnv, &newForwardStep, pPrimaryColumn, pFields, pBlockInfo); } else { - *numOfRes = blockwiseApplyAllFunctions(pRuntimeEnv, newForwardStep, pPrimaryColumn, sdata, pFields, pBlockInfo, - isFileBlock); + *numOfRes = blockwiseApplyAllFunctions(pRuntimeEnv, newForwardStep, pPrimaryColumn, pFields, pBlockInfo); } assert(*numOfRes >= 0); @@ -1900,7 +2319,7 @@ int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, if (order == TSQL_SO_ASC) { int32_t i = 0; - int32_t step = 1; + int32_t step = QUERY_ASC_FORWARD_STEP; while (i pVnodeFiles->pFileInfo[i].fileID) { i += step; @@ -1914,7 +2333,7 @@ int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, } } else { int32_t i = numOfFiles - 1; - int32_t step = -1; + int32_t step = QUERY_DESC_FORWARD_STEP; while (i >= 0 && *fid < pVnodeFiles->pFileInfo[i].fileID) { i += step; @@ -1933,17 +2352,17 @@ int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeter SQuery *pQuery = pRuntimeEnv->pQuery; pQuery->fileId += step; - int32_t fid = 0; + int32_t fileIndex = 0; int32_t order = (step == QUERY_ASC_FORWARD_STEP) ? TSQL_SO_ASC : TSQL_SO_DESC; while (1) { - fid = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, order); + fileIndex = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, order); // no files left, abort - if (fid < 0) { + if (fileIndex < 0) { if (step == QUERY_ASC_FORWARD_STEP) { - dTrace("QInfo:%p no file to access, try data in cache", GET_QINFO_ADDR(pQuery)); + dTrace("QInfo:%p no more file to access, try data in cache", GET_QINFO_ADDR(pQuery)); } else { - dTrace("QInfo:%p no file to access in desc order, query completed", GET_QINFO_ADDR(pQuery)); + dTrace("QInfo:%p no more file to access in desc order, query completed", GET_QINFO_ADDR(pQuery)); } vnodeFreeFieldsEx(pRuntimeEnv); @@ -1951,9 +2370,8 @@ int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeter break; } - // failed to mmap header file into memory will cause the retrieval of compblock info failed - if (vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fid) > 0) { + if (vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fileIndex) > 0) { break; } @@ -1966,15 +2384,15 @@ int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeter pQuery->fileId += step; /* for backwards search, if the first file is not valid, abort */ - if (step < 0 && fid == 0) { + if (step < 0 && fileIndex == 0) { vnodeFreeFieldsEx(pRuntimeEnv); pQuery->fileId = -1; - fid = -1; + fileIndex = -1; break; } } - return fid; + return fileIndex; } void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimestamp, void *inputData, @@ -2028,7 +2446,7 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimes pCtx->ptsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE); } else if (functionId == TSDB_FUNC_ARITHM) { - pCtx->param[0].pz = param; + pCtx->param[1].pz = param; } pCtx->startOffset = startOffset; @@ -2117,14 +2535,15 @@ static int32_t setupQueryRuntimeEnv(SMeterObj *pMeterObj, SQuery *pQuery, SQuery pCtx->order = pQuery->order.order; pCtx->functionId = pSqlFuncMsg->functionId; - /* - * tricky: in case of char array parameters, we employ the shallow copy - * method and get the ownership of the char array, it later release the allocated memory if exists - */ pCtx->numOfParams = pSqlFuncMsg->numOfParams; for (int32_t j = 0; j < pCtx->numOfParams; ++j) { - pCtx->param[j].nType = pSqlFuncMsg->arg[j].argType; - pCtx->param[j].i64Key = pSqlFuncMsg->arg[j].argValue.i64; + int16_t type = pSqlFuncMsg->arg[j].argType; + int16_t bytes = pSqlFuncMsg->arg[j].argBytes; + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + tVariantCreateFromBinary(&pCtx->param[j], pSqlFuncMsg->arg->argValue.pz, bytes, type); + } else { + tVariantCreateFromBinary(&pCtx->param[j], (char *)&pSqlFuncMsg->arg[j].argValue.i64, bytes, type); + } } // set the order information for top/bottom query @@ -2160,60 +2579,12 @@ static int32_t setupQueryRuntimeEnv(SMeterObj *pMeterObj, SQuery *pQuery, SQuery // for loading block data in memory assert(vnodeList[pMeterObj->vnode].cfg.rowsInFileBlock == pMeterObj->pointsPerFileBlock); - - // To make sure the start position of each buffer is aligned to 4bytes in 32-bit ARM system. - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - int32_t bytes = pQuery->colList[i].data.bytes; - pRuntimeEnv->colDataBuffer[i] = calloc(1, sizeof(SData) + EXTRA_BYTES + pMeterObj->pointsPerFileBlock * bytes); - if (pRuntimeEnv->colDataBuffer[i] == NULL) { - goto _error_clean; - } - } - - // record the maximum column width among columns of this meter/metric - int32_t maxColWidth = pQuery->colList[0].data.bytes; - for (int32_t i = 1; i < pQuery->numOfCols; ++i) { - int32_t bytes = pQuery->colList[i].data.bytes; - if (bytes > maxColWidth) { - maxColWidth = bytes; - } - } - - pRuntimeEnv->primaryColBuffer = NULL; - if (PRIMARY_TSCOL_LOADED(pQuery)) { - pRuntimeEnv->primaryColBuffer = pRuntimeEnv->colDataBuffer[0]; - } else { - pRuntimeEnv->primaryColBuffer = - (SData *)malloc(pMeterObj->pointsPerFileBlock * TSDB_KEYSIZE + sizeof(SData) + EXTRA_BYTES); - } - - pRuntimeEnv->unzipBufSize = (size_t)(maxColWidth * pMeterObj->pointsPerFileBlock + EXTRA_BYTES); // plus extra_bytes - - pRuntimeEnv->unzipBuffer = (char *)malloc(pRuntimeEnv->unzipBufSize); - pRuntimeEnv->secondaryUnzipBuffer = (char *)calloc(1, pRuntimeEnv->unzipBufSize); - - if (pRuntimeEnv->unzipBuffer == NULL || pRuntimeEnv->secondaryUnzipBuffer == NULL || - pRuntimeEnv->primaryColBuffer == NULL) { - goto _error_clean; - } - return TSDB_CODE_SUCCESS; _error_clean: tfree(pRuntimeEnv->resultInfo); tfree(pRuntimeEnv->pCtx); - for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { - tfree(pRuntimeEnv->colDataBuffer[i]); - } - - tfree(pRuntimeEnv->unzipBuffer); - tfree(pRuntimeEnv->secondaryUnzipBuffer); - - if (!PRIMARY_TSCOL_LOADED(pQuery)) { - tfree(pRuntimeEnv->primaryColBuffer); - } - return TSDB_CODE_SERV_OUT_OF_MEMORY; } @@ -2228,8 +2599,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { } tfree(pRuntimeEnv->secondaryUnzipBuffer); - - taosCleanUpIntHash(pRuntimeEnv->hashList); + destroySlidingWindowInfo(&pRuntimeEnv->swindowResInfo); if (pRuntimeEnv->pCtx != NULL) { for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { @@ -2261,6 +2631,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { free(pRuntimeEnv->vnodeFileInfo.pFileInfo); } + taosDestoryInterpoInfo(&pRuntimeEnv->interpoInfo); + if (pRuntimeEnv->pInterpoBuf != NULL) { for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { tfree(pRuntimeEnv->pInterpoBuf[i]); @@ -2269,10 +2641,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tfree(pRuntimeEnv->pInterpoBuf); } - if (pRuntimeEnv->pTSBuf != NULL) { - tsBufDestory(pRuntimeEnv->pTSBuf); - pRuntimeEnv->pTSBuf = NULL; - } + pRuntimeEnv->pTSBuf = tsBufDestory(pRuntimeEnv->pTSBuf); } // get maximum time interval in each file @@ -2320,11 +2689,6 @@ bool isFixedOutputQuery(SQuery *pQuery) { continue; } - // // ignore the group by + projection combination - // if (pExprMsg->functionId == TSDB_FUNC_PRJ && isGroupbyNormalCol(pQuery)) { - // continue; - // } - if (!IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].nStatus)) { return true; } @@ -2371,6 +2735,11 @@ bool isFirstLastRowQuery(SQuery *pQuery) { return false; } +bool notHasQueryTimeRange(SQuery *pQuery) { + return (pQuery->skey == 0 && pQuery->ekey == INT64_MAX && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->skey == INT64_MAX && pQuery->ekey == 0 && (!QUERY_IS_ASC_QUERY(pQuery))); +} + bool isTSCompQuery(SQuery *pQuery) { return pQuery->pSelectExpr[0].pBase.functionId == TSDB_FUNC_TS_COMP; } bool needSupplementaryScan(SQuery *pQuery) { @@ -2445,11 +2814,13 @@ static int32_t getFirstCacheSlot(int32_t numOfBlocks, int32_t lastSlot, SCacheIn return (lastSlot - numOfBlocks + 1 + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; } -static bool cacheBoundaryCheck(SQuery *pQuery, SMeterObj *pMeterObj) { +static bool cacheBoundaryCheck(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj) { /* * here we get the first slot from the meter cache, not from the cache snapshot from pQuery, since the * snapshot value in pQuery may have been expired now. */ + SQuery *pQuery = pRuntimeEnv->pQuery; + SCacheInfo * pCacheInfo = (SCacheInfo *)pMeterObj->pCache; SCacheBlock *pBlock = NULL; @@ -2473,8 +2844,8 @@ static bool cacheBoundaryCheck(SQuery *pQuery, SMeterObj *pMeterObj) { * pBlock may be null value since this block is flushed to disk, and re-distributes to * other meter, so go on until we get the first not flushed cache block. */ - if ((pBlock = getCacheDataBlock(pMeterObj, pQuery, first)) != NULL) { - keyFirst = getTimestampInCacheBlock(pBlock, 0); + if ((pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, first)) != NULL) { + keyFirst = getTimestampInCacheBlock(pRuntimeEnv, pBlock, 0); break; } else { /* @@ -2506,18 +2877,23 @@ void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t v // commitSlot here denotes the first uncommitted block in cache int32_t numOfBlocks = 0; int32_t lastSlot = 0; + int32_t commitSlot = 0; + int32_t commitPoint = 0; SCachePool *pPool = (SCachePool *)vnodeList[vid].pCachePool; pthread_mutex_lock(&pPool->vmutex); numOfBlocks = pCacheInfo->numOfBlocks; lastSlot = pCacheInfo->currentSlot; + commitSlot = pCacheInfo->commitSlot; + commitPoint = pCacheInfo->commitPoint; pthread_mutex_unlock(&pPool->vmutex); // make sure it is there, otherwise, return right away pQuery->currentSlot = lastSlot; pQuery->numOfBlocks = numOfBlocks; pQuery->firstSlot = getFirstCacheSlot(numOfBlocks, lastSlot, pCacheInfo); - ; + pQuery->commitSlot = commitSlot; + pQuery->commitPoint = commitPoint; /* * Note: the block id is continuous increasing, never becomes smaller. @@ -2551,7 +2927,7 @@ int64_t getQueryStartPositionInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t *slo assert((pQuery->lastKey >= pQuery->skey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey <= pQuery->skey && !QUERY_IS_ASC_QUERY(pQuery))); - if (!ignoreQueryRange && !cacheBoundaryCheck(pQuery, pMeterObj)) { + if (!ignoreQueryRange && !cacheBoundaryCheck(pRuntimeEnv, pMeterObj)) { return -1; } @@ -2566,8 +2942,16 @@ int64_t getQueryStartPositionInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t *slo /* locate the first point of which time stamp is no less than pQuery->skey */ __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; - SCacheBlock *pBlock = pCacheInfo->cacheBlocks[*slot]; - (*pos) = searchFn(pBlock->offset[0], pBlock->numOfPoints, pQuery->skey, pQuery->order.order); + pQuery->slot = *slot; + + // cache block has been flushed to disk, no required data block in cache. + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); + if (pBlock == NULL) { + pQuery->skey = rawskey; // restore the skey + return -1; + } + + (*pos) = searchFn(pRuntimeEnv->primaryColBuffer->data, pBlock->numOfPoints, pQuery->skey, pQuery->order.order); // restore skey before return pQuery->skey = rawskey; @@ -2577,7 +2961,7 @@ int64_t getQueryStartPositionInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t *slo return -1; } - int64_t nextKey = getTimestampInCacheBlock(pBlock, *pos); + int64_t nextKey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, *pos); if ((nextKey < pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || (nextKey > pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))) { // all data are less than the pQuery->lastKey(pQuery->sKey) for asc query @@ -2629,7 +3013,7 @@ bool hasDataInCache(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj) { return false; } - return cacheBoundaryCheck(pQuery, pMeterObj); + return cacheBoundaryCheck(pRuntimeEnv, pMeterObj); } /** @@ -2645,59 +3029,77 @@ void vnodeCheckIfDataExists(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, setQueryStatus(pQuery, QUERY_NOT_COMPLETED); } -static void doGetAlignedIntervalQueryRangeImpl(SQuery *pQuery, int64_t qualifiedKey, int64_t keyFirst, int64_t keyLast, - int64_t *skey, int64_t *ekey) { - assert(qualifiedKey >= keyFirst && qualifiedKey <= keyLast); +static void doGetAlignedIntervalQueryRangeImpl(SQuery *pQuery, int64_t pKey, int64_t keyFirst, int64_t keyLast, + int64_t *actualSkey, int64_t *actualEkey, int64_t *skey, int64_t *ekey) { + assert(pKey >= keyFirst && pKey <= keyLast); + *skey = taosGetIntervalStartTimestamp(pKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); if (keyFirst > (INT64_MAX - pQuery->nAggTimeInterval)) { /* - * if the skey > INT64_MAX - pQuery->nAggTimeInterval, the query duration between - * skey and ekey must be less than one interval.Therefore, no need to adjust the query ranges. + * if the actualSkey > INT64_MAX - pQuery->nAggTimeInterval, the query duration between + * actualSkey and actualEkey must be less than one interval.Therefore, no need to adjust the query ranges. */ assert(keyLast - keyFirst < pQuery->nAggTimeInterval); - *skey = keyFirst; - *ekey = keyLast; + *actualSkey = keyFirst; + *actualEkey = keyLast; + + *ekey = INT64_MAX; return; } - *skey = taosGetIntervalStartTimestamp(qualifiedKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, - pQuery->precision); - int64_t endKey = *skey + pQuery->nAggTimeInterval - 1; + *ekey = *skey + pQuery->nAggTimeInterval - 1; if (*skey < keyFirst) { - *skey = keyFirst; + *actualSkey = keyFirst; + } else { + *actualSkey = *skey; } - if (endKey < keyLast) { - *ekey = endKey; + if (*ekey < keyLast) { + *actualEkey = *ekey; } else { - *ekey = keyLast; + *actualEkey = keyLast; } } -static void doGetAlignedIntervalQueryRange(SQuery *pQuery, TSKEY key, TSKEY skey, TSKEY ekey) { - TSKEY skey1, ekey1; +static void getAlignedIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key, TSKEY skey, TSKEY ekey) { + SQuery *pQuery = pRuntimeEnv->pQuery; + if (pQuery->nAggTimeInterval == 0 || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { + return; + } - TSKEY skey2 = (skey < ekey) ? skey : ekey; - TSKEY ekey2 = (skey < ekey) ? ekey : skey; + TSKEY skey2 = MIN(skey, ekey); + TSKEY ekey2 = MAX(skey, ekey); - doGetAlignedIntervalQueryRangeImpl(pQuery, key, skey2, ekey2, &skey1, &ekey1); + // the actual first query range in skey1 and ekey1 + TSKEY skey1, ekey1; + + TSKEY windowSKey = 0, windowEKey = 0; + doGetAlignedIntervalQueryRangeImpl(pQuery, key, skey2, ekey2, &skey1, &ekey1, &windowSKey, &windowEKey); if (QUERY_IS_ASC_QUERY(pQuery)) { pQuery->skey = skey1; pQuery->ekey = ekey1; - assert(pQuery->skey <= pQuery->ekey); + + pRuntimeEnv->intervalWindow = (STimeWindow) {.skey = windowSKey, .ekey = windowEKey}; + + assert(pQuery->skey <= pQuery->ekey && + pRuntimeEnv->intervalWindow.skey + (pQuery->nAggTimeInterval - 1) == pRuntimeEnv->intervalWindow.ekey); } else { pQuery->skey = ekey1; pQuery->ekey = skey1; - assert(pQuery->skey >= pQuery->ekey); + + pRuntimeEnv->intervalWindow = (STimeWindow) {.skey = windowEKey, .ekey = windowSKey}; + + assert(pQuery->skey >= pQuery->ekey && + pRuntimeEnv->intervalWindow.skey - (pQuery->nAggTimeInterval - 1) == pRuntimeEnv->intervalWindow.ekey); } pQuery->lastKey = pQuery->skey; } -static void getOneRowFromDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, char **dst, int32_t pos) { +static void getOneRowFromDataBlock(SQueryRuntimeEnv *pRuntimeEnv, char **dst, int32_t pos) { SQuery *pQuery = pRuntimeEnv->pQuery; for (int32_t i = 0; i < pQuery->numOfCols; ++i) { @@ -2706,32 +3108,6 @@ static void getOneRowFromDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, char **dst, in } } -static void getOneRowFromCacheBlock(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, SCacheBlock *pBlock, - char **dst, int32_t pos) { - SQuery *pQuery = pRuntimeEnv->pQuery; - - /* - * in case of cache block expired, the pos may exceed the number of points in block, so check - * the range in the first place. - */ - if (pos > pBlock->numOfPoints) { - pos = pBlock->numOfPoints; - } - - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - int32_t colIdx = pQuery->colList[i].colIdx; - int32_t colId = pQuery->colList[i].data.colId; - - SColumn *pCols = &pMeterObj->schema[colIdx]; - - if (colIdx < 0 || colIdx >= pMeterObj->numOfColumns || pCols->colId != colId) { // set null - setNull(dst[i], pCols->type, pCols->bytes); - } else { - memcpy(dst[i], pBlock->offset[colIdx] + pos * pCols->bytes, pCols->bytes); - } - } -} - static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMeterObj, SPointInterpoSupporter *pPointInterpSupporter) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; @@ -2757,29 +3133,7 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet pQuery->slot, pQuery->pos); // save the point that is directly after or equals to the specified point - if (IS_DISK_DATA_BLOCK(pQuery)) { - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); - } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; - - while (pBlock == NULL) { - // cache block is flushed to disk, try to find new query position again - getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); - - // new position is located in file, load data and abort - if (IS_DISK_DATA_BLOCK(pQuery)) { - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); - break; - } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - } - } - - if (!IS_DISK_DATA_BLOCK(pQuery)) { - getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pNextPoint, pQuery->pos); - } - } + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); /* * 1. for last_row query, return immediately. @@ -2808,12 +3162,8 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet if (pQuery->pos > 0) { int32_t prevPos = pQuery->pos - 1; - if (IS_DISK_DATA_BLOCK(pQuery)) { - /* save the point that is directly after the specified point */ - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, prevPos); - } else { - getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pPrevPoint, prevPos); - } + /* save the point that is directly after the specified point */ + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, prevPos); } else { __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; @@ -2823,10 +3173,12 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet moveToNextBlock(pRuntimeEnv, QUERY_DESC_FORWARD_STEP, searchFn, true); /* - * no previous data exists reset the status and load the data block that contains the qualified point + * no previous data exists. + * reset the status and load the data block that contains the qualified point */ if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { - dTrace("QInfo:%p no previous data block, start fileId:%d, slot:%d, pos:%d, qrange:%lld-%lld, out of range", + dTrace("QInfo:%p no previous data block, start fileId:%d, slot:%d, pos:%d, qrange:%" PRId64 "-%" PRId64 + ", out of range", GET_QINFO_ADDR(pQuery), pRuntimeEnv->startPos.fileId, pRuntimeEnv->startPos.slot, pRuntimeEnv->startPos.pos, pQuery->skey, pQuery->ekey); @@ -2836,21 +3188,20 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet } else { // prev has been located if (pQuery->fileId >= 0) { pQuery->pos = pQuery->pBlock[pQuery->slot].numOfPoints - 1; - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos); + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos); qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot, pQuery->pos, pQuery->pos); } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - if (pBlock == NULL) { - // todo nothing, the previous block is flushed to disk - } else { - pQuery->pos = pBlock->numOfPoints - 1; - getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pPrevPoint, pQuery->pos); + // moveToNextBlock make sure there is a available cache block, if exists + assert(vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, -1, true) == DISK_BLOCK_NO_NEED_TO_LOAD); + pBlock = &pRuntimeEnv->cacheBlock; - qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery), - pQuery->fileId, pQuery->slot, pBlock->numOfPoints - 1, pQuery->pos); - } + pQuery->pos = pBlock->numOfPoints - 1; + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos); + + qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery), + pQuery->fileId, pQuery->slot, pBlock->numOfPoints - 1, pQuery->pos); } } } @@ -2872,7 +3223,7 @@ static bool doGetQueryPos(TSKEY key, SMeterQuerySupportObj *pSupporter, SPointIn if (isPointInterpoQuery(pQuery)) { /* no qualified data in this query range */ return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); } else { - getAlignedIntervalQueryRange(pQuery, key, pQuery->skey, pQuery->ekey); + getAlignedIntervalQueryRange(pRuntimeEnv, key, pQuery->skey, pQuery->ekey); return true; } } else { // key > pQuery->ekey, abort for normal query, continue for interp query @@ -2884,14 +3235,101 @@ static bool doGetQueryPos(TSKEY key, SMeterQuerySupportObj *pSupporter, SPointIn } } +static bool doSetDataInfo(SMeterQuerySupportObj *pSupporter, SPointInterpoSupporter *pPointInterpSupporter, + SMeterObj *pMeterObj, TSKEY nextKey) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + if (isFirstLastRowQuery(pQuery)) { + /* + * if the pQuery->skey != pQuery->ekey for last_row query, + * the query range is existed, so set them both the value of nextKey + */ + if (pQuery->skey != pQuery->ekey) { + assert(pQuery->skey >= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery) && nextKey >= pQuery->ekey && + nextKey <= pQuery->skey); + + pQuery->skey = nextKey; + pQuery->ekey = nextKey; + } + + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); + } else { + getAlignedIntervalQueryRange(pRuntimeEnv, nextKey, pQuery->skey, pQuery->ekey); + return true; + } +} + +// TODO refactor code, the best way to implement the last_row is utilizing the iterator +bool normalizeUnBoundLastRowQuery(SMeterQuerySupportObj *pSupporter, SPointInterpoSupporter *pPointInterpSupporter) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + assert(!QUERY_IS_ASC_QUERY(pQuery) && notHasQueryTimeRange(pQuery)); + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + + TSKEY lastKey = -1; + + pQuery->fileId = -1; + vnodeFreeFieldsEx(pRuntimeEnv); + + // keep in-memory cache status in local variables in case that it may be changed by write operation + getBasicCacheInfoSnapshot(pQuery, pMeterObj->pCache, pMeterObj->vnode); + + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + if (pCacheInfo != NULL && pCacheInfo->cacheBlocks != NULL && pQuery->numOfBlocks > 0) { + pQuery->fileId = -1; + TSKEY key = pMeterObj->lastKey; + + pQuery->skey = key; + pQuery->ekey = key; + pQuery->lastKey = pQuery->skey; + + /* + * cache block may have been flushed to disk, and no data in cache anymore. + * So, copy cache block to local buffer is required. + */ + lastKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); + if (lastKey < 0) { // data has been flushed to disk, try again search in file + lastKey = getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { + return false; + } + } + } else { // no data in cache, try file + TSKEY key = pMeterObj->lastKeyOnFile; + + pQuery->skey = key; + pQuery->ekey = key; + pQuery->lastKey = pQuery->skey; + + bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); + if (!ret) { // no data in file, return false; + return false; + } + + lastKey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + } + + assert(lastKey <= pQuery->skey); + + pQuery->skey = lastKey; + pQuery->ekey = lastKey; + pQuery->lastKey = pQuery->skey; + + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); +} + /** * determine the first query range, according to raw query range [skey, ekey] and group-by interval. * the time interval for aggregating is not enforced to check its validation, the minimum interval is not less than - * 10ms, - * which is guaranteed by parser at client-side + * 10ms, which is guaranteed by parser at client-side */ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySupportObj *pSupporter, - SPointInterpoSupporter *pPointInterpSupporter) { + SPointInterpoSupporter *pPointInterpSupporter, int64_t *key) { SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; @@ -2900,10 +3338,14 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup if (QUERY_IS_ASC_QUERY(pQuery)) { // todo: the action return as the getQueryStartPositionInCache function if (dataInDisk && getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_GREATER_EQUAL, searchFn)) { - TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); - assert(key >= pQuery->skey); + TSKEY nextKey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + assert(nextKey >= pQuery->skey); - return doGetQueryPos(key, pSupporter, pPointInterpSupporter); + if (key != NULL) { + *key = nextKey; + } + + return doGetQueryPos(nextKey, pSupporter, pPointInterpSupporter); } // set no data in file @@ -2916,6 +3358,11 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup } TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); + + if (key != NULL) { + *key = nextKey; + } + return doGetQueryPos(nextKey, pSupporter, pPointInterpSupporter); } else { // descending order @@ -2923,44 +3370,40 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); assert(nextKey == -1 || nextKey <= pQuery->skey); - // valid data in cache - if (nextKey != -1) { + if (key != NULL) { + *key = nextKey; + } + + if (nextKey != -1) { // find qualified data in cache if (nextKey >= pQuery->ekey) { - if (isFirstLastRowQuery(pQuery)) { - return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); - } else { - getAlignedIntervalQueryRange(pQuery, nextKey, pQuery->skey, pQuery->ekey); - return true; - } + return doSetDataInfo(pSupporter, pPointInterpSupporter, pMeterObj, nextKey); } else { /* * nextKey < pQuery->ekey && nextKey < pQuery->lastKey, query range is - * larger than all data, abort NOTE: Interp query does not reach here, since for all interp query, + * larger than all data, abort + * + * NOTE: Interp query does not reach here, since for all interp query, * the query order is ascending order. */ return false; } - } else { // all data in cache are greater than pQuery->lastKey, try file + } else { // all data in cache are greater than pQuery->skey, try file } } if (dataInDisk && getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn)) { - TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); - assert(key <= pQuery->skey); + TSKEY nextKey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + assert(nextKey <= pQuery->skey); - /* key in query range. If not, no qualified in disk file */ - if (key >= pQuery->ekey) { - if (isFirstLastRowQuery(pQuery)) { /* no qualified data in this query range */ - return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); - } else { - getAlignedIntervalQueryRange(pQuery, key, pQuery->skey, pQuery->ekey); - return true; - } - } else { // Goes on in case of key in file less than pMeterObj->lastKey, - // which is also the pQuery->skey - if (isFirstLastRowQuery(pQuery)) { - return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); - } + if (key != NULL) { + *key = nextKey; + } + + // key in query range. If not, no qualified in disk file + if (nextKey >= pQuery->ekey) { + return doSetDataInfo(pSupporter, pPointInterpSupporter, pMeterObj, nextKey); + } else { // In case of all queries, the value of false will be returned if key < pQuery->ekey + return false; } } } @@ -2968,7 +3411,6 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup return false; } -// todo handle the mmap relative offset value assert problem int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *position) { TSKEY nextTimestamp = -1; @@ -2986,9 +3428,9 @@ int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *p return -1; } - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); if (pBlock != NULL) { - nextTimestamp = getTimestampInCacheBlock(pBlock, position->pos); + nextTimestamp = getTimestampInCacheBlock(pRuntimeEnv, pBlock, position->pos); } else { // todo fix it } @@ -3171,12 +3613,7 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInf } // update the pQuery->limit.offset value, and pQuery->pos value - TSKEY *keys = NULL; - if (IS_DISK_DATA_BLOCK(pQuery)) { - keys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; - } else { - keys = (TSKEY *)(((SCacheBlock *)pBlock)->offset[0]); - } + TSKEY *keys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; int32_t i = 0; if (QUERY_IS_ASC_QUERY(pQuery)) { @@ -3217,7 +3654,7 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInf if (IS_DISK_DATA_BLOCK(pQuery)) { pQuery->skey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); } else { - pQuery->skey = getTimestampInCacheBlock(pBlock, pQuery->pos); + pQuery->skey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, pQuery->pos); } // update the offset value @@ -3252,15 +3689,23 @@ static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB static void changeExecuteScanOrder(SQuery *pQuery, bool metricQuery) { // in case of point-interpolation query, use asc order scan char msg[] = - "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%lld-%lld, " - "new qrange:%lld-%lld"; + "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64 "-%" PRId64 + ", " + "new qrange:%" PRId64 "-%" PRId64; - // descending order query + // descending order query for last_row query if (isFirstLastRowQuery(pQuery)) { dTrace("QInfo:%p scan order changed for last_row query, old:%d, new:%d", GET_QINFO_ADDR(pQuery), pQuery->order.order, TSQL_SO_DESC); pQuery->order.order = TSQL_SO_DESC; + + int64_t skey = MIN(pQuery->skey, pQuery->ekey); + int64_t ekey = MAX(pQuery->skey, pQuery->ekey); + + pQuery->skey = ekey; + pQuery->ekey = skey; + return; } @@ -3333,11 +3778,11 @@ static int32_t doSkipDataBlock(SQueryRuntimeEnv *pRuntimeEnv) { break; } - void *pBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + void *pBlock = getGenericDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); assert(pBlock != NULL); int32_t blockType = IS_DISK_DATA_BLOCK(pQuery) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; - SBlockInfo blockInfo = getBlockBasicInfo(pBlock, blockType); + SBlockInfo blockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, blockType); int32_t maxReads = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.size - pQuery->pos : pQuery->pos + 1; assert(maxReads >= 0); @@ -3351,7 +3796,7 @@ static int32_t doSkipDataBlock(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.keyLast : blockInfo.keyFirst; pQuery->lastKey += step; - qTrace("QInfo:%p skip rows:%d, offset:%lld", GET_QINFO_ADDR(pQuery), maxReads, pQuery->limit.offset); + qTrace("QInfo:%p skip rows:%d, offset:%" PRId64 "", GET_QINFO_ADDR(pQuery), maxReads, pQuery->limit.offset); } } @@ -3366,10 +3811,10 @@ void forwardQueryStartPosition(SQueryRuntimeEnv *pRuntimeEnv) { return; } - void *pBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + void *pBlock = getGenericDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); int32_t blockType = (IS_DISK_DATA_BLOCK(pQuery)) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; - SBlockInfo blockInfo = getBlockBasicInfo(pBlock, blockType); + SBlockInfo blockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, blockType); // get the qualified data that can be skipped int32_t maxReads = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.size - pQuery->pos : pQuery->pos + 1; @@ -3380,7 +3825,7 @@ void forwardQueryStartPosition(SQueryRuntimeEnv *pRuntimeEnv) { } else { pQuery->limit.offset -= maxReads; // update the lastkey, since the following skip operation may traverse to another media. update the lastkey first. - pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery))? blockInfo.keyLast+1:blockInfo.keyFirst-1; + pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.keyLast + 1 : blockInfo.keyFirst - 1; doSkipDataBlock(pRuntimeEnv); } } @@ -3433,7 +3878,7 @@ static bool forwardQueryStartPosIfNeeded(SQInfo *pQInfo, SMeterQuerySupportObj * pQuery->lastKey = pQuery->skey; // todo opt performance - if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, NULL) == false) { + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, NULL, NULL) == false) { sem_post(&pQInfo->dataReady); // hack for next read for empty return pQInfo->over = 1; return false; @@ -3559,7 +4004,6 @@ void pointInterpSupporterSetData(SQInfo *pQInfo, SPointInterpoSupporter *pPointI pCtx->numOfParams = 4; SInterpInfo *pInterpInfo = (SInterpInfo *)pRuntimeEnv->pCtx[i].aOutputBuf; - pInterpInfo->pInterpDetail = calloc(1, sizeof(SInterpInfoDetail)); SInterpInfoDetail *pInterpDetail = pInterpInfo->pInterpDetail; @@ -3679,7 +4123,7 @@ static void allocMemForInterpo(SMeterQuerySupportObj *pSupporter, SQuery *pQuery static int32_t allocateOutputBufForGroup(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, bool isMetricQuery) { int32_t slot = 0; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { slot = 10000; } else { slot = pSupporter->pSidSet->numOfSubSet; @@ -3693,36 +4137,95 @@ static int32_t allocateOutputBufForGroup(SMeterQuerySupportObj *pSupporter, SQue for (int32_t k = 0; k < slot; ++k) { SOutputRes *pOneRes = &pSupporter->pResult[k]; pOneRes->nAlloc = 1; - + /* * for single table top/bottom query, the output for group by normal column, the output rows is * equals to the maximum rows, instead of 1. */ if (!isMetricQuery && isTopBottomQuery(pQuery)) { assert(pQuery->numOfOutputCols > 1); - + SSqlFunctionExpr *pExpr = &pQuery->pSelectExpr[1]; pOneRes->nAlloc = pExpr->pBase.arg[0].argValue.i64; } - + createGroupResultBuf(pQuery, pOneRes, isMetricQuery); } return TSDB_CODE_SUCCESS; } +static int32_t allocateRuntimeEnvBuf(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + // To make sure the start position of each buffer is aligned to 4bytes in 32-bit ARM system. + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + int32_t bytes = pQuery->colList[i].data.bytes; + pRuntimeEnv->colDataBuffer[i] = calloc(1, sizeof(SData) + EXTRA_BYTES + pMeterObj->pointsPerFileBlock * bytes); + if (pRuntimeEnv->colDataBuffer[i] == NULL) { + goto _error_clean; + } + } + + // record the maximum column width among columns of this meter/metric + int32_t maxColWidth = pQuery->colList[0].data.bytes; + for (int32_t i = 1; i < pQuery->numOfCols; ++i) { + int32_t bytes = pQuery->colList[i].data.bytes; + if (bytes > maxColWidth) { + maxColWidth = bytes; + } + } + + pRuntimeEnv->primaryColBuffer = NULL; + if (PRIMARY_TSCOL_LOADED(pQuery)) { + pRuntimeEnv->primaryColBuffer = pRuntimeEnv->colDataBuffer[0]; + } else { + pRuntimeEnv->primaryColBuffer = + (SData *)malloc(pMeterObj->pointsPerFileBlock * TSDB_KEYSIZE + sizeof(SData) + EXTRA_BYTES); + } + + pRuntimeEnv->unzipBufSize = (size_t)(maxColWidth * pMeterObj->pointsPerFileBlock + EXTRA_BYTES); // plus extra_bytes + + pRuntimeEnv->unzipBuffer = (char *)calloc(1, pRuntimeEnv->unzipBufSize); + pRuntimeEnv->secondaryUnzipBuffer = (char *)calloc(1, pRuntimeEnv->unzipBufSize); + + if (pRuntimeEnv->unzipBuffer == NULL || pRuntimeEnv->secondaryUnzipBuffer == NULL || + pRuntimeEnv->primaryColBuffer == NULL) { + goto _error_clean; + } + + return TSDB_CODE_SUCCESS; + +_error_clean: + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { + tfree(pRuntimeEnv->colDataBuffer[i]); + } + + tfree(pRuntimeEnv->unzipBuffer); + tfree(pRuntimeEnv->secondaryUnzipBuffer); + + if (!PRIMARY_TSCOL_LOADED(pQuery)) { + tfree(pRuntimeEnv->primaryColBuffer); + } + + return TSDB_CODE_SERV_OUT_OF_MEMORY; +} + int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMeterQuerySupportObj *pSupporter, void *param) { SQuery *pQuery = &pQInfo->query; + int32_t code = TSDB_CODE_SUCCESS; + /* + * only the successful complete requries the sem_post/over = 1 operations. + */ if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { - dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + dTrace("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->skey, pQuery->ekey, pQuery->order.order); sem_post(&pQInfo->dataReady); pQInfo->over = 1; - return TSDB_CODE_SUCCESS; } @@ -3744,49 +4247,54 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete // check data in file or cache bool dataInCache = true; bool dataInDisk = true; - pSupporter->runtimeEnv.pQuery = pQuery; - vnodeCheckIfDataExists(&pSupporter->runtimeEnv, pMeterObj, &dataInDisk, &dataInCache); + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + pRuntimeEnv->pQuery = pQuery; + pRuntimeEnv->pMeterObj = pMeterObj; + + if ((code = allocateRuntimeEnvBuf(pRuntimeEnv, pMeterObj)) != TSDB_CODE_SUCCESS) { + return code; + } + + vnodeCheckIfDataExists(pRuntimeEnv, pMeterObj, &dataInDisk, &dataInCache); /* data in file or cache is not qualified for the query. abort */ if (!(dataInCache || dataInDisk)) { dTrace("QInfo:%p no result in query", pQInfo); sem_post(&pQInfo->dataReady); pQInfo->over = 1; - - return TSDB_CODE_SUCCESS; + return code; } - pSupporter->runtimeEnv.pTSBuf = param; - pSupporter->runtimeEnv.cur.vnodeIndex = -1; + pRuntimeEnv->pTSBuf = param; + pRuntimeEnv->cur.vnodeIndex = -1; if (param != NULL) { - int16_t order = (pQuery->order.order == pSupporter->runtimeEnv.pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; - tsBufSetTraverseOrder(pSupporter->runtimeEnv.pTSBuf, order); + int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; + tsBufSetTraverseOrder(pRuntimeEnv->pTSBuf, order); } // create runtime environment - int32_t ret = setupQueryRuntimeEnv(pMeterObj, pQuery, &pSupporter->runtimeEnv, NULL, pQuery->order.order, false); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + code = setupQueryRuntimeEnv(pMeterObj, pQuery, &pSupporter->runtimeEnv, NULL, pQuery->order.order, false); + if (code != TSDB_CODE_SUCCESS) { + return code; } vnodeRecordAllFiles(pQInfo, pMeterObj->vnode); - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - if ((ret = allocateOutputBufForGroup(pSupporter, pQuery, false)) != TSDB_CODE_SUCCESS) { - return ret; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { + if ((code = allocateOutputBufForGroup(pSupporter, pQuery, false)) != TSDB_CODE_SUCCESS) { + return code; } - pSupporter->runtimeEnv.hashList = taosInitIntHash(10039, sizeof(void *), taosHashInt); - pSupporter->runtimeEnv.usedIndex = 0; - pSupporter->runtimeEnv.pResult = pSupporter->pResult; - } + int16_t type = TSDB_DATA_TYPE_NULL; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); + } else { + type = TSDB_DATA_TYPE_TIMESTAMP; + } - // in case of last_row query, we set the query timestamp to pMeterObj->lastKey; - if (isFirstLastRowQuery(pQuery)) { - pQuery->skey = pMeterObj->lastKey; - pQuery->ekey = pMeterObj->lastKey; - pQuery->lastKey = pQuery->skey; + // todo bug! + initSlidingWindowInfo(&pRuntimeEnv->swindowResInfo, 3, type, pSupporter->pResult); } pSupporter->rawSKey = pQuery->skey; @@ -3799,14 +4307,79 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete SPointInterpoSupporter interpInfo = {0}; pointInterpSupporterInit(pQuery, &interpInfo); - if ((normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo) == false) || - (isFixedOutputQuery(pQuery) && !isTopBottomQuery(pQuery) && (pQuery->limit.offset > 0)) || - (isTopBottomQuery(pQuery) && pQuery->limit.offset >= pQuery->pSelectExpr[1].pBase.arg[0].argValue.i64)) { - sem_post(&pQInfo->dataReady); - pQInfo->over = 1; + /* + * in case of last_row query without query range, we set the query timestamp to + * pMeterObj->lastKey. Otherwise, keep the initial query time range unchanged. + */ - pointInterpSupporterDestroy(&interpInfo); - return TSDB_CODE_SUCCESS; + if (isFirstLastRowQuery(pQuery) && notHasQueryTimeRange(pQuery)) { + if (!normalizeUnBoundLastRowQuery(pSupporter, &interpInfo)) { + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + pointInterpSupporterDestroy(&interpInfo); + return TSDB_CODE_SUCCESS; + } + } else { + // find the skey and ekey in case of sliding query + // todo refactor + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0) { + int64_t skey = 0; + + SWAP(pQuery->skey, pQuery->ekey, int64_t); + pQuery->order.order ^= 1; + pQuery->lastKey = pQuery->skey; + + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo, &skey) == false) { + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + pointInterpSupporterDestroy(&interpInfo); + return TSDB_CODE_SUCCESS; + } + + pQuery->skey = skey; + + pQuery->order.order ^= 1; + SWAP(pQuery->skey, pQuery->ekey, int64_t); + + int64_t ekey = 0; + pQuery->lastKey = pQuery->skey; + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo, &ekey) == false) { + // + } + + pQuery->skey = ekey; + + TSKEY skey1, ekey1; + TSKEY windowSKey = 0, windowEKey = 0; + + TSKEY minKey = MIN(pQuery->skey, pQuery->ekey); + TSKEY maxKey = MAX(pQuery->skey, pQuery->ekey); + + doGetAlignedIntervalQueryRangeImpl(pQuery, minKey, minKey, maxKey, &skey1, &ekey1, &windowSKey, &windowEKey); + pRuntimeEnv->swindowResInfo.startTime = windowSKey; + + pSupporter->rawSKey = pQuery->skey; + pSupporter->rawEKey = pQuery->ekey; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + pRuntimeEnv->swindowResInfo.prevSKey = windowSKey; + } else { + pRuntimeEnv->swindowResInfo.prevSKey = windowSKey + ((pQuery->skey - windowSKey) / pQuery->slidingTime) * pQuery->slidingTime; + } + } else { + int64_t ekey = 0; + if ((normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo, &ekey) == false) || + (isFixedOutputQuery(pQuery) && !isTopBottomQuery(pQuery) && (pQuery->limit.offset > 0)) || + (isTopBottomQuery(pQuery) && pQuery->limit.offset >= pQuery->pSelectExpr[1].pBase.arg[0].argValue.i64)) { + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + pointInterpSupporterDestroy(&interpInfo); + return TSDB_CODE_SUCCESS; + } + } } /* @@ -3822,7 +4395,7 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete int64_t rs = taosGetIntervalStartTimestamp(pSupporter->rawSKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); - taosInitInterpoInfo(&pSupporter->runtimeEnv.interpoInfo, pQuery->order.order, rs, 0, 0); + taosInitInterpoInfo(&pRuntimeEnv->interpoInfo, pQuery->order.order, rs, 0, 0); allocMemForInterpo(pSupporter, pQuery, pMeterObj); if (!isPointInterpoQuery(pQuery)) { @@ -3845,14 +4418,15 @@ void vnodeQueryFreeQInfoEx(SQInfo *pQInfo) { teardownQueryRuntimeEnv(&pSupporter->runtimeEnv); tfree(pSupporter->pMeterSidExtInfo); - if (pSupporter->pMeterObj != NULL) { - taosCleanUpIntHash(pSupporter->pMeterObj); - pSupporter->pMeterObj = NULL; + if (pSupporter->pMetersHashTable != NULL) { + taosCleanUpHashTable(pSupporter->pMetersHashTable); + pSupporter->pMetersHashTable = NULL; } - if (pSupporter->pSidSet != NULL || isGroupbyNormalCol(pQInfo->query.pGroupbyExpr)) { + if (pSupporter->pSidSet != NULL || isGroupbyNormalCol(pQInfo->query.pGroupbyExpr) || + (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { int32_t size = 0; - if (isGroupbyNormalCol(pQInfo->query.pGroupbyExpr)) { + if (isGroupbyNormalCol(pQInfo->query.pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { size = 10000; } else if (pSupporter->pSidSet != NULL) { size = pSupporter->pSidSet->numOfSubSet; @@ -3865,7 +4439,7 @@ void vnodeQueryFreeQInfoEx(SQInfo *pQInfo) { if (FD_VALID(pSupporter->meterOutputFd)) { assert(pSupporter->meterOutputMMapBuf != NULL); - dTrace("QInfo:%p disk-based output buffer during query:%lld bytes", pQInfo, pSupporter->bufSize); + dTrace("QInfo:%p disk-based output buffer during query:%" PRId64 " bytes", pQInfo, pSupporter->bufSize); munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); tclose(pSupporter->meterOutputFd); @@ -3892,7 +4466,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { - dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + dTrace("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->skey, pQuery->ekey, pQuery->order.order); sem_post(&pQInfo->dataReady); @@ -3906,10 +4480,11 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) pQuery->pointsRead = 0; changeExecuteScanOrder(pQuery, true); + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; - doInitQueryFileInfoFD(&pSupporter->runtimeEnv.vnodeFileInfo); - vnodeInitDataBlockInfo(&pSupporter->runtimeEnv.loadBlockInfo); - vnodeInitLoadCompBlockInfo(&pSupporter->runtimeEnv.loadCompBlockInfo); + doInitQueryFileInfoFD(&pRuntimeEnv->vnodeFileInfo); + vnodeInitDataBlockInfo(&pRuntimeEnv->loadBlockInfo); + vnodeInitLoadCompBlockInfo(&pRuntimeEnv->loadCompBlockInfo); /* * since we employ the output control mechanism in main loop. @@ -3931,15 +4506,15 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) } // get one queried meter - SMeterObj *pMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[0]->sid); + SMeterObj *pMeter = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pSidSet->pSids[0]->sid); - pSupporter->runtimeEnv.pTSBuf = param; - pSupporter->runtimeEnv.cur.vnodeIndex = -1; + pRuntimeEnv->pTSBuf = param; + pRuntimeEnv->cur.vnodeIndex = -1; // set the ts-comp file traverse order if (param != NULL) { - int16_t order = (pQuery->order.order == pSupporter->runtimeEnv.pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; - tsBufSetTraverseOrder(pSupporter->runtimeEnv.pTSBuf, order); + int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; + tsBufSetTraverseOrder(pRuntimeEnv->pTSBuf, order); } int32_t ret = setupQueryRuntimeEnv(pMeter, pQuery, &pSupporter->runtimeEnv, pTagSchema, TSQL_SO_ASC, true); @@ -3947,6 +4522,11 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) return ret; } + ret = allocateRuntimeEnvBuf(pRuntimeEnv, pMeter); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + tSidSetSort(pSupporter->pSidSet); vnodeRecordAllFiles(pQInfo, pMeter->vnode); @@ -3955,9 +4535,8 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) } if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group by columns not tags; - pSupporter->runtimeEnv.hashList = taosInitIntHash(10039, sizeof(void *), taosHashInt); - pSupporter->runtimeEnv.usedIndex = 0; - pSupporter->runtimeEnv.pResult = pSupporter->pResult; + int16_t type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); + initSlidingWindowInfo(&pRuntimeEnv->swindowResInfo, 10039, type, pSupporter->pResult); } if (pQuery->nAggTimeInterval != 0) { @@ -3978,7 +4557,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) return TSDB_CODE_SERV_NO_DISKSPACE; } - pSupporter->runtimeEnv.numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / pQuery->rowSize; + pRuntimeEnv->numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / pQuery->rowSize; pSupporter->lastPageId = -1; pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE; @@ -3997,7 +4576,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) TSKEY revisedStime = taosGetIntervalStartTimestamp(pSupporter->rawSKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); - taosInitInterpoInfo(&pSupporter->runtimeEnv.interpoInfo, pQuery->order.order, revisedStime, 0, 0); + taosInitInterpoInfo(&pRuntimeEnv->interpoInfo, pQuery->order.order, revisedStime, 0, 0); return TSDB_CODE_SUCCESS; } @@ -4016,7 +4595,7 @@ void vnodeDecMeterRefcnt(SQInfo *pQInfo) { } else { int32_t num = 0; for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { - SMeterObj *pMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[i]->sid); + SMeterObj *pMeter = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pSidSet->pSids[i]->sid); atomic_fetch_sub_32(&(pMeter->numOfQueries), 1); if (pMeter->numOfQueries > 0) { @@ -4036,26 +4615,12 @@ void vnodeDecMeterRefcnt(SQInfo *pQInfo) { } } -// todo merge with doRevisedResultsByLimit -void UNUSED_FUNC truncateResultByLimit(SQInfo *pQInfo, int64_t *final, int32_t *interpo) { - SQuery *pQuery = &(pQInfo->query); - - if (pQuery->limit.limit > 0 && ((*final) + pQInfo->pointsRead > pQuery->limit.limit)) { - int64_t num = (*final) + pQInfo->pointsRead - pQuery->limit.limit; - (*interpo) -= num; - (*final) -= num; - - setQueryStatus(pQuery, QUERY_COMPLETED); // query completed - } -} - -TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index) { +TSKEY getTimestampInCacheBlock(SQueryRuntimeEnv *pRuntimeEnv, SCacheBlock *pBlock, int32_t index) { if (pBlock == NULL || index >= pBlock->numOfPoints || index < 0) { return -1; } - TSKEY *ts = (TSKEY *)pBlock->offset[0]; - return ts[index]; + return ((TSKEY *)(pRuntimeEnv->primaryColBuffer->data))[index]; } /* @@ -4072,24 +4637,25 @@ TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); // this block must be loaded into buffer - SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; + SLoadDataBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; assert(pQuery->pos >= 0 && pQuery->pos < pBlock->numOfPoints); SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - bool loadTimestamp = true; - int32_t fileId = pQuery->fileId; - int32_t fileIndex = vnodeGetVnodeHeaderFileIdx(&fileId, pRuntimeEnv, pQuery->order.order); - + int32_t fileIndex = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, pQuery->order.order); + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, slot:%d load data block due to primary key required", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot); - - int32_t ret = - loadDataBlockIntoMem(pBlock, &pQuery->pFields[pQuery->slot], pRuntimeEnv, fileIndex, loadTimestamp, true); + + bool loadTS = true; + bool loadFields = true; + int32_t slot = pQuery->slot; + + int32_t ret = loadDataBlockIntoMem(pBlock, &pQuery->pFields[slot], pRuntimeEnv, fileIndex, loadTS, loadFields); if (ret != TSDB_CODE_SUCCESS) { return -1; } - + SET_DATA_BLOCK_LOADED(pRuntimeEnv->blockStatus); SET_FILE_BLOCK_FLAG(pRuntimeEnv->blockStatus); @@ -4098,7 +4664,7 @@ TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { } // todo remove this function -static void getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { +static TSKEY getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; assert(pQuery->fileId == -1 && QUERY_IS_ASC_QUERY(pQuery)); @@ -4117,10 +4683,11 @@ static void getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { } else if (nextTimestamp > pQuery->ekey) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + return nextTimestamp; } -// TODO handle case that the cache is allocated but not assign to SMeterObj -void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn) { +TSKEY getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn) { SQuery * pQuery = pRuntimeEnv->pQuery; SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; @@ -4128,7 +4695,7 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear dTrace( "QInfo:%p vid:%d sid:%d id:%s cache block re-allocated to other meter, " - "try get query start position in file/cache, qrange:%lld-%lld, lastKey:%lld", + "try get query start position in file/cache, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); if (step == QUERY_DESC_FORWARD_STEP) { @@ -4138,7 +4705,7 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear */ bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); - dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position, fileId:%d, slot:%d, pos:%d", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position in file, fileId:%d, slot:%d, pos:%d", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot, pQuery->pos); if (ret) { @@ -4148,10 +4715,13 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear if (key < pQuery->ekey) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + return key; } else { setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return -1; // no data to check } - } else { + } else { // asc query bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_GREATER_EQUAL, searchFn); if (ret) { dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position, fileId:%d, slot:%d, pos:%d", pQInfo, @@ -4163,24 +4733,27 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear if (key > pQuery->ekey) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + return key; } else { /* - * all data in file is less than the pQuery->lastKey, try cache. + * all data in file is less than the pQuery->lastKey, try cache again. * cache block status will be set in getFirstDataBlockInCache function */ - getFirstDataBlockInCache(pRuntimeEnv); + TSKEY key = getFirstDataBlockInCache(pRuntimeEnv); dTrace("QInfo:%p vid:%d sid:%d id:%s find the new position in cache, fileId:%d, slot:%d, pos:%d", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot, pQuery->pos); + return key; } } } static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; - SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; assert(pQuery->fileId < 0); /* @@ -4200,6 +4773,7 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste int32_t currentSlot = pCacheInfo->currentSlot; int32_t firstSlot = getFirstCacheSlot(numOfBlocks, currentSlot, pCacheInfo); + if (step == QUERY_DESC_FORWARD_STEP && pQuery->slot == firstSlot) { bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); if (ret) { @@ -4212,7 +4786,6 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste // the skip operation does NOT set the startPos yet // assert(pRuntimeEnv->startPos.fileId < 0); - } else { setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); } @@ -4221,7 +4794,7 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste /* now still iterate the cache data blocks */ pQuery->slot = (pQuery->slot + step + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); /* * data in this cache block has been flushed to disk, then we should locate the start position in file. @@ -4234,7 +4807,7 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste } else { pQuery->pos = (QUERY_IS_ASC_QUERY(pQuery)) ? 0 : pBlock->numOfPoints - 1; - TSKEY startkey = getTimestampInCacheBlock(pBlock, pQuery->pos); + TSKEY startkey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, pQuery->pos); if (startkey < 0) { setQueryStatus(pQuery, QUERY_COMPLETED); } @@ -4269,22 +4842,25 @@ static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __bl (step == QUERY_DESC_FORWARD_STEP && (pQuery->slot == 0))) { fileIndex = getNextDataFileCompInfo(pRuntimeEnv, pMeterObj, step); /* data maybe in cache */ - if (fileIndex < 0) { + + if (fileIndex >= 0) { // next file + pQuery->slot = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->numOfBlocks - 1; + pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; + } else { // try data in cache assert(pQuery->fileId == -1); + if (step == QUERY_ASC_FORWARD_STEP) { getFirstDataBlockInCache(pRuntimeEnv); - } else { /* no data any more */ + } else { // no data to check for desc order query setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); } return DISK_DATA_LOADED; - } else { - pQuery->slot = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->numOfBlocks - 1; - pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; } } else { // next block in the same file int32_t fid = pQuery->fileId; fileIndex = vnodeGetVnodeHeaderFileIdx(&fid, pRuntimeEnv, pQuery->order.order); + pQuery->slot += step; pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; } @@ -4296,14 +4872,11 @@ static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __bl return DISK_DATA_LOADED; } + // load data block function will change the value of pQuery->pos int32_t ret = LoadDatablockOnDemand(&pQuery->pBlock[pQuery->slot], &pQuery->pFields[pQuery->slot], &pRuntimeEnv->blockStatus, pRuntimeEnv, fileIndex, pQuery->slot, searchFn, true); if (ret != DISK_DATA_LOADED) { - /* - * if it is the last block of file, set current access position at the last point of the meter in this file, - * in order to get the correct next access point, - */ return ret; } } else { // data in cache @@ -4313,66 +4886,47 @@ static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __bl return DISK_DATA_LOADED; } -static void doHandleFileBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, - SData **sdata, int32_t *numOfRes, int32_t blockLoadStatus, int32_t *forwardStep) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SQueryCostSummary *pSummary = &pRuntimeEnv->summary; - - int64_t start = taosGetTimestampUs(); - - SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); - *pblockInfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); - - TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; - - if (blockLoadStatus == DISK_DATA_LOADED) { - *forwardStep = applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, (char *)sdata, - pQuery->pFields[pQuery->slot], searchFn, numOfRes); - } else { - *forwardStep = pblockInfo->size; - } - - pSummary->fileTimeUs += (taosGetTimestampUs() - start); -} - -static void doHandleCacheBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, - int32_t *numOfRes, int32_t *forwardStep) { +static void doHandleDataBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, + int32_t *numOfRes, int32_t blockLoadStatus, int32_t *forwardStep) { SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; SQueryCostSummary *pSummary = &pRuntimeEnv->summary; + TSKEY * primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; int64_t start = taosGetTimestampUs(); - // todo refactor getCacheDataBlock. - //#ifdef _CACHE_INVALID_TEST - // taosMsleep(20000); - //#endif - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - while (pBlock == NULL) { - getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); + if (IS_DISK_DATA_BLOCK(pQuery)) { + SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); + *pblockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_FILE_BLOCK); - if (IS_DISK_DATA_BLOCK(pQuery)) { // do check data block in file - break; + if (blockLoadStatus == DISK_DATA_LOADED) { + *forwardStep = applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, pQuery->pFields[pQuery->slot], + searchFn, numOfRes); } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + *forwardStep = pblockInfo->size; } - } - if (IS_DISK_DATA_BLOCK(pQuery)) { - // start query position is located in file, try query on file block - doHandleFileBlockImpl(pRuntimeEnv, pblockInfo, searchFn, pRuntimeEnv->colDataBuffer, numOfRes, DISK_DATA_LOADED, - forwardStep); - } else { // also query in cache block - *pblockInfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK); + pSummary->fileTimeUs += (taosGetTimestampUs() - start); + } else { + assert(vnodeIsDatablockLoaded(pRuntimeEnv, pRuntimeEnv->pMeterObj, -1, true) == DISK_BLOCK_NO_NEED_TO_LOAD); - TSKEY *primaryKeys = (TSKEY *)pBlock->offset[0]; - *forwardStep = - applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, (char *)pBlock, NULL, searchFn, numOfRes); + SCacheBlock *pBlock = getCacheDataBlock(pRuntimeEnv->pMeterObj, pRuntimeEnv, pQuery->slot); + *pblockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_CACHE_BLOCK); + + *forwardStep = applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, NULL, searchFn, numOfRes); pSummary->cacheTimeUs += (taosGetTimestampUs() - start); } } +static void getNextLogicalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow* pTimeWindow) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + pTimeWindow->skey += (pQuery->slidingTime * factor); + pTimeWindow->ekey += (pQuery->slidingTime * factor); +} + static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; bool LOAD_DATA = true; @@ -4381,7 +4935,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { int64_t cnt = 0; SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - SData ** sdata = pRuntimeEnv->colDataBuffer; __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; int32_t blockLoadStatus = DISK_DATA_LOADED; @@ -4393,7 +4946,8 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SPositionInfo *pStartPos = &pRuntimeEnv->startPos; assert(pQuery->slot == pStartPos->slot); - dTrace("QInfo:%p query start, qrange:%lld-%lld, lastkey:%lld, order:%d, start fileId:%d, slot:%d, pos:%d, bstatus:%d", + dTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 + ", order:%d, start fileId:%d, slot:%d, pos:%d, bstatus:%d", GET_QINFO_ADDR(pQuery), pQuery->skey, pQuery->ekey, pQuery->lastKey, pQuery->order.order, pStartPos->fileId, pStartPos->slot, pStartPos->pos, pRuntimeEnv->blockStatus); @@ -4406,14 +4960,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { int32_t numOfRes = 0; SBlockInfo blockInfo = {0}; + doHandleDataBlockImpl(pRuntimeEnv, &blockInfo, searchFn, &numOfRes, blockLoadStatus, &forwardStep); - if (IS_DISK_DATA_BLOCK(pQuery)) { - doHandleFileBlockImpl(pRuntimeEnv, &blockInfo, searchFn, sdata, &numOfRes, blockLoadStatus, &forwardStep); - } else { - doHandleCacheBlockImpl(pRuntimeEnv, &blockInfo, searchFn, &numOfRes, &forwardStep); - } - - dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d, rows:%d, checked:%d", + dTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 + ", fileId:%d, slot:%d, pos:%d, bstatus:%d, rows:%d, checked:%d", GET_QINFO_ADDR(pQuery), blockInfo.keyFirst, blockInfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, pRuntimeEnv->blockStatus, blockInfo.size, forwardStep); @@ -4462,10 +5012,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } // check next block - void *pNextBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + void *pNextBlock = getGenericDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); int32_t blockType = (IS_DISK_DATA_BLOCK(pQuery)) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; - blockInfo = getBlockBasicInfo(pNextBlock, blockType); + blockInfo = getBlockBasicInfo(pRuntimeEnv, pNextBlock, blockType); if (!checkQueryRangeAgainstNextBlock(&blockInfo, pRuntimeEnv)) { break; } @@ -4476,8 +5026,8 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { static void updatelastkey(SQuery *pQuery, SMeterQueryInfo *pMeterQInfo) { pMeterQInfo->lastKey = pQuery->lastKey; } -void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32_t blockStatus, char *data, - SBlockInfo *pBlockBasicInfo, SMeterDataInfo *pDataHeadInfoEx, SField *pFields, +void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32_t blockStatus, + SBlockInfo *pBlockBasicInfo, SMeterDataInfo *pMeterDataInfo, SField *pFields, __block_search_fn_t searchFn) { /* cache blocks may be assign to other meter, abort */ if (pBlockBasicInfo->size <= 0) { @@ -4489,22 +5039,21 @@ void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32 if (pQuery->nAggTimeInterval == 0) { // not interval query int32_t numOfRes = 0; - applyFunctionsOnBlock(pRuntimeEnv, pBlockBasicInfo, primaryKeys, data, pFields, searchFn, &numOfRes); + applyFunctionsOnBlock(pRuntimeEnv, pBlockBasicInfo, primaryKeys, pFields, searchFn, &numOfRes); // note: only fixed number of output for each group by operation if (numOfRes > 0) { - pSupporter->pResult[pDataHeadInfoEx->groupIdx].numOfRows = numOfRes; + pSupporter->pResult[pMeterDataInfo->groupIdx].numOfRows = numOfRes; } // used to decide the correct start position in cache after check all data in files - updatelastkey(pQuery, pDataHeadInfoEx->pMeterQInfo); + updatelastkey(pQuery, pMeterDataInfo->pMeterQInfo); if (pRuntimeEnv->pTSBuf != NULL) { - pDataHeadInfoEx->pMeterQInfo->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + pMeterDataInfo->pMeterQInfo->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); } } else { - applyIntervalQueryOnBlock(pSupporter, pDataHeadInfoEx, data, primaryKeys, pBlockBasicInfo, blockStatus, pFields, - searchFn); + applyIntervalQueryOnBlock(pSupporter, pMeterDataInfo, pBlockBasicInfo, blockStatus, pFields, searchFn); } } @@ -4522,10 +5071,11 @@ static void doSetTagValueInParam(tTagSchema *pTagSchema, int32_t tagColIdx, SMet SSchema *pCol = &pTagSchema->pSchema[tagColIdx]; tVariantDestroy(param); - tVariantCreateFromBinary(param, pStr, pCol->bytes, pCol->type); if (isNull(pStr, pCol->type)) { param->nType = TSDB_DATA_TYPE_NULL; + } else { + tVariantCreateFromBinary(param, pStr, pCol->bytes, pCol->type); } } @@ -4602,27 +5152,27 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { switch (srcDataType) { case TSDB_DATA_TYPE_BINARY: - printf("%ld,%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: - printf("%ld,%d\t", *(TSKEY *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_SMALLINT: - printf("%ld,%d\t", *(TSKEY *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - printf("%ld,%ld\t", *(TSKEY *)data, *(TSKEY *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%" PRId64 "\t", *(TSKEY *)data, *(TSKEY *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_INT: - printf("%ld,%d\t", *(TSKEY *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_FLOAT: - printf("%ld,%f\t", *(TSKEY *)data, *(float *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%f\t", *(TSKEY *)data, *(float *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%ld,%lf\t", *(TSKEY *)data, *(double *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%lf\t", *(TSKEY *)data, *(double *)(data + TSDB_KEYSIZE + 1)); break; } } else if (functionId == TSDB_FUNC_AVG) { @@ -4631,8 +5181,8 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) printf("%lf,%lf\t", *(double *)data, *(double *)(data + sizeof(double))); } else if (functionId == TSDB_FUNC_TWA) { data += 1; - printf("%lf,%ld,%ld,%ld\t", *(double *)data, *(int64_t *)(data + 8), *(int64_t *)(data + 16), - *(int64_t *)(data + 24)); + printf("%lf,%" PRId64 ",%" PRId64 ",%" PRId64 "\t", *(double *)data, *(int64_t *)(data + 8), + *(int64_t *)(data + 16), *(int64_t *)(data + 24)); } else if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) { switch (srcDataType) { case TSDB_DATA_TYPE_TINYINT: @@ -4644,7 +5194,7 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - printf("%ld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int *)data); @@ -4660,7 +5210,7 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) if (srcDataType == TSDB_DATA_TYPE_FLOAT || srcDataType == TSDB_DATA_TYPE_DOUBLE) { printf("%lf\t", *(float *)data); } else { - printf("%ld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); } } else { printf("%s\t", data); @@ -4692,7 +5242,7 @@ void UNUSED_FUNC displayInterResult(SData **pdata, SQuery *pQuery, int32_t numOf } case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%ld\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); + printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); @@ -4714,13 +5264,13 @@ static tFilePage *getFilePage(SMeterQuerySupportObj *pSupporter, int32_t pageId) return (tFilePage *)(pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * pageId); } -static tFilePage *getMeterDataPage(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, int32_t pageId) { - SMeterQueryInfo *pInfo = pInfoEx->pMeterQInfo; - if (pageId >= pInfo->numOfPages) { +static tFilePage *getMeterDataPage(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pMeterDataInfo, int32_t pageId) { + SMeterQueryInfo *pMeterQueryInfo = pMeterDataInfo->pMeterQInfo; + if (pageId >= pMeterQueryInfo->numOfPages) { return NULL; } - int32_t realId = pInfo->pageList[pageId]; + int32_t realId = pMeterQueryInfo->pageList[pageId]; return getFilePage(pSupporter, realId); } @@ -4730,14 +5280,14 @@ typedef struct Position { } Position; typedef struct SCompSupporter { - SMeterDataInfo ** pInfoEx; + SMeterDataInfo ** pMeterDataInfo; Position * pPosition; SMeterQuerySupportObj *pSupporter; } SCompSupporter; int64_t getCurrentTimestamp(SCompSupporter *pSupportor, int32_t meterIdx) { Position * pPos = &pSupportor->pPosition[meterIdx]; - tFilePage *pPage = getMeterDataPage(pSupportor->pSupporter, pSupportor->pInfoEx[meterIdx], pPos->pageIdx); + tFilePage *pPage = getMeterDataPage(pSupportor->pSupporter, pSupportor->pMeterDataInfo[meterIdx], pPos->pageIdx); return *(int64_t *)(pPage->data + TSDB_KEYSIZE * pPos->rowIdx); } @@ -4760,10 +5310,10 @@ int32_t meterResultComparator(const void *pLeft, const void *pRight, void *param return -1; } - tFilePage *pPageLeft = getMeterDataPage(supportor->pSupporter, supportor->pInfoEx[left], leftPos.pageIdx); + tFilePage *pPageLeft = getMeterDataPage(supportor->pSupporter, supportor->pMeterDataInfo[left], leftPos.pageIdx); int64_t leftTimestamp = *(int64_t *)(pPageLeft->data + TSDB_KEYSIZE * leftPos.rowIdx); - tFilePage *pPageRight = getMeterDataPage(supportor->pSupporter, supportor->pInfoEx[right], rightPos.pageIdx); + tFilePage *pPageRight = getMeterDataPage(supportor->pSupporter, supportor->pMeterDataInfo[right], rightPos.pageIdx); int64_t rightTimestamp = *(int64_t *)(pPageRight->data + TSDB_KEYSIZE * rightPos.rowIdx); if (leftTimestamp == rightTimestamp) { @@ -4914,17 +5464,17 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery cs.pPosition[pos].pageIdx += 1; // try next page // check if current page is empty or not. if it is empty, ignore it and try next - if (cs.pPosition[pos].pageIdx <= cs.pInfoEx[pos]->pMeterQInfo->numOfPages - 1) { + if (cs.pPosition[pos].pageIdx <= cs.pMeterDataInfo[pos]->pMeterQInfo->numOfPages - 1) { tFilePage *newPage = getMeterDataPage(cs.pSupporter, pValidMeter[pos], position->pageIdx); if (newPage->numOfElems <= 0) { // if current source data page is null, it must be the last page of source output page cs.pPosition[pos].pageIdx += 1; - assert(cs.pPosition[pos].pageIdx >= cs.pInfoEx[pos]->pMeterQInfo->numOfPages - 1); + assert(cs.pPosition[pos].pageIdx >= cs.pMeterDataInfo[pos]->pMeterQInfo->numOfPages - 1); } } // the following code must be executed if current source pages are exhausted - if (cs.pPosition[pos].pageIdx >= cs.pInfoEx[pos]->pMeterQInfo->numOfPages) { + if (cs.pPosition[pos].pageIdx >= cs.pMeterDataInfo[pos]->pMeterQInfo->numOfPages) { cs.pPosition[pos].pageIdx = -1; cs.pPosition[pos].rowIdx = -1; @@ -4958,7 +5508,7 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); #endif - dTrace("QInfo:%p result merge completed, elapsed time:%lld ms", GET_QINFO_ADDR(pQuery), endt - startt); + dTrace("QInfo:%p result merge completed, elapsed time:%" PRId64 " ms", GET_QINFO_ADDR(pQuery), endt - startt); tfree(pTree); tfree(pValidMeter); tfree(posArray); @@ -5062,7 +5612,7 @@ int32_t doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { if (pMeterInfo[i].pMeterQInfo != NULL && pMeterInfo[i].pMeterQInfo->lastResRows > 0) { int32_t index = pMeterInfo[i].meterOrderIdx; - pRuntimeEnv->pMeterObj = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[index]->sid); + pRuntimeEnv->pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pSidSet->pSids[index]->sid); assert(pRuntimeEnv->pMeterObj == pMeterInfo[i].pMeterObj); int32_t ret = setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo); @@ -5084,13 +5634,20 @@ int32_t doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { void disableFunctForSuppleScan(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0)) { for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { pRuntimeEnv->pCtx[i].order = (pRuntimeEnv->pCtx[i].order) ^ 1; } - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if (!pStatus->closed) { + continue; + } + + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; // open/close the specified query for each group result for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5165,6 +5722,21 @@ void clearGroupResultBuf(SOutputRes *pOneOutputRes, int32_t nOutputCols) { } } +void copyGroupResultBuf(SOutputRes* dst, const SOutputRes* src, int32_t nOutputCols) { + for(int32_t i = 0; i < nOutputCols; ++i) { + SResultInfo *pDst = &dst->resultInfo[i]; + SResultInfo *pSrc = &src->resultInfo[i]; + + char* buf = pDst->interResultBuf; + memcpy(pDst, pSrc, sizeof(SResultInfo)); + pDst->interResultBuf = buf; + memcpy(pDst->interResultBuf, pSrc->interResultBuf, pDst->bufLen); + + int32_t size = sizeof(tFilePage) + pSrc->bufLen * src->nAlloc; + memcpy(dst->result[i], src->result[i], size); + } +} + void destroyGroupResultBuf(SOutputRes *pOneOutputRes, int32_t nOutputCols) { if (pOneOutputRes == NULL) { return; @@ -5185,13 +5757,7 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - - // ts_comp query does not required reversed output - if (QUERY_IS_ASC_QUERY(pQuery) || isTSCompQuery(pQuery)) { - pCtx->aOutputBuf = pQuery->sdata[i]->data; - } else { // point to the last position of output buffer for desc query - pCtx->aOutputBuf = pQuery->sdata[i]->data + (rows - 1) * pCtx->outputBytes; - } + pCtx->aOutputBuf = pQuery->sdata[i]->data; /* * set the output buffer information and intermediate buffer @@ -5214,7 +5780,6 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); // reset the execution contexts for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5223,7 +5788,7 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { // set next output position if (IS_OUTER_FORWARD(aAggs[functionId].nStatus)) { - pRuntimeEnv->pCtx[j].aOutputBuf += pRuntimeEnv->pCtx[j].outputBytes * output * factor; + pRuntimeEnv->pCtx[j].aOutputBuf += pRuntimeEnv->pCtx[j].outputBytes * output /** factor*/; } if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { @@ -5234,7 +5799,7 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { * * diff function is handled in multi-output function */ - pRuntimeEnv->pCtx[j].ptsOutputBuf += TSDB_KEYSIZE * output * factor; + pRuntimeEnv->pCtx[j].ptsOutputBuf += TSDB_KEYSIZE * output/* * factor*/; } resetResultInfo(pRuntimeEnv->pCtx[j].resultInfo); @@ -5269,30 +5834,17 @@ void doSkipResults(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->over &= (~QUERY_RESBUF_FULL); } else { int32_t numOfSkip = (int32_t)pQuery->limit.offset; - int32_t size = pQuery->pointsRead; - pQuery->pointsRead -= numOfSkip; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; - int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; - if (QUERY_IS_ASC_QUERY(pQuery)) { - memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + bytes * numOfSkip, pQuery->pointsRead * bytes); - } else { // DESC query - int32_t maxrows = pQuery->pointsToRead; - - memmove(pQuery->sdata[i]->data + (maxrows - pQuery->pointsRead) * bytes, - pQuery->sdata[i]->data + (maxrows - size) * bytes, pQuery->pointsRead * bytes); - } - - pRuntimeEnv->pCtx[i].aOutputBuf -= bytes * numOfSkip * step; + memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + bytes * numOfSkip, pQuery->pointsRead * bytes); + pRuntimeEnv->pCtx[i].aOutputBuf += bytes * numOfSkip; if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { - pRuntimeEnv->pCtx[i].ptsOutputBuf -= TSDB_KEYSIZE * numOfSkip * step; + pRuntimeEnv->pCtx[i].ptsOutputBuf += TSDB_KEYSIZE * numOfSkip; } } @@ -5300,27 +5852,6 @@ void doSkipResults(SQueryRuntimeEnv *pRuntimeEnv) { } } -/** - * move remain data to the start position of output buffer - * @param pRuntimeEnv - */ -void moveDescOrderResultsToFront(SQueryRuntimeEnv *pRuntimeEnv) { - SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t maxrows = pQuery->pointsToRead; - - if (QUERY_IS_ASC_QUERY(pQuery) || isTSCompQuery(pQuery)) { - return; - } - - if (pQuery->pointsRead > 0 && pQuery->pointsRead < maxrows) { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; - memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + (maxrows - pQuery->pointsRead) * bytes, - pQuery->pointsRead * bytes); - } - } -} - typedef struct SQueryStatus { SPositionInfo start; SPositionInfo next; @@ -5340,6 +5871,9 @@ static void queryStatusSave(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus pStatus->overStatus = pQuery->over; pStatus->lastKey = pQuery->lastKey; + pStatus->skey = pQuery->skey; + pStatus->ekey = pQuery->ekey; + pStatus->start = pRuntimeEnv->startPos; pStatus->next = pRuntimeEnv->nextPos; pStatus->end = pRuntimeEnv->endPos; @@ -5356,13 +5890,18 @@ static void queryStatusSave(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus SWAP(pQuery->skey, pQuery->ekey, TSKEY); pQuery->lastKey = pQuery->skey; pRuntimeEnv->startPos = pRuntimeEnv->endPos; + + SWAP(pRuntimeEnv->intervalWindow.skey, pRuntimeEnv->intervalWindow.ekey, TSKEY); } static void queryStatusRestore(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus) { SQuery *pQuery = pRuntimeEnv->pQuery; SWAP(pQuery->skey, pQuery->ekey, TSKEY); - + SWAP(pRuntimeEnv->intervalWindow.skey, pRuntimeEnv->intervalWindow.ekey, TSKEY); + pQuery->lastKey = pStatus->lastKey; + pQuery->skey = pStatus->skey; + pQuery->ekey = pStatus->ekey; pQuery->over = pStatus->overStatus; @@ -5381,9 +5920,11 @@ static void doSingleMeterSupplementScan(SQueryRuntimeEnv *pRuntimeEnv) { return; } + dTrace("QInfo:%p start to supp scan", GET_QINFO_ADDR(pQuery)); + SET_SUPPLEMENT_SCAN_FLAG(pRuntimeEnv); - // usually this load operation will incure load disk block operation + // usually this load operation will incur load disk block operation TSKEY endKey = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->endPos); assert((QUERY_IS_ASC_QUERY(pQuery) && endKey <= pQuery->ekey) || @@ -5421,17 +5962,25 @@ void vnodeScanAllData(SQueryRuntimeEnv *pRuntimeEnv) { /* store the start query position */ savePointPosition(&pRuntimeEnv->startPos, pQuery->fileId, pQuery->slot, pQuery->pos); - + int64_t skey = pQuery->lastKey; + while (1) { doScanAllDataBlocks(pRuntimeEnv); - // applied to agg functions (e.g., stddev) bool toContinue = true; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { // for each group result, call the finalize function for each column - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; + + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if (!pStatus->closed) { + continue; + } + setGroupOutputBuffer(pRuntimeEnv, buf); for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5469,33 +6018,32 @@ void vnodeScanAllData(SQueryRuntimeEnv *pRuntimeEnv) { } } + int64_t newSkey = pQuery->skey; + pQuery->skey = skey; + doSingleMeterSupplementScan(pRuntimeEnv); - - // reset status code - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; - for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { - buf->resultInfo[j].complete = false; - } - } - } else { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[i]); - if (pResInfo != NULL) { - pResInfo->complete = false; - } - } - } + + // update the pQuery->skey/pQuery->ekey to limit the scan scope of sliding query during + // supplementary scan + pQuery->skey = newSkey; } void doFinalizeResult(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { // for each group result, call the finalize function for each column - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + closeAllSlidingWindow(pSlidingWindowInfo); + } + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; + if (!slidingWindowClosed(pSlidingWindowInfo, i)) { + continue; + } + setGroupOutputBuffer(pRuntimeEnv, buf); for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5554,37 +6102,71 @@ int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { return maxOutput; } -/* - * forward the query range for next interval query - */ -void forwardIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeEnv *pRuntimeEnv) { +static int32_t getNextIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeEnv *pRuntimeEnv, + int64_t *skey, int64_t *ekey) { SQuery *pQuery = pRuntimeEnv->pQuery; int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - pQuery->ekey += (pQuery->nAggTimeInterval * factor); - pQuery->skey = pQuery->ekey - (pQuery->nAggTimeInterval - 1) * factor; - // boundary check - if (QUERY_IS_ASC_QUERY(pQuery)) { - if (pQuery->skey > pSupporter->rawEKey) { - setQueryStatus(pQuery, QUERY_COMPLETED); - return; - } + *skey = pRuntimeEnv->intervalWindow.skey + (pQuery->slidingTime * factor); + *ekey = pRuntimeEnv->intervalWindow.ekey + (pQuery->slidingTime * factor); + + if (pQuery->slidingTime > 0) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + // the next sliding window is not contained in the query time range + if (*skey < pSupporter->rawSKey) { + *skey = pSupporter->rawSKey; + } + + if (*skey > pSupporter->rawEKey) { + return QUERY_COMPLETED; + } + + if (*ekey > pSupporter->rawEKey) { + *ekey = pSupporter->rawEKey; + } + } else { + if (*skey > pSupporter->rawSKey) { + *skey = pSupporter->rawSKey; + } + + if (*skey < pSupporter->rawEKey) { + return QUERY_COMPLETED; + } - if (pQuery->ekey > pSupporter->rawEKey) { - pQuery->ekey = pSupporter->rawEKey; + if (*ekey < pSupporter->rawEKey) { + *ekey = pSupporter->rawEKey; + } } - } else { - if (pQuery->skey < pSupporter->rawEKey) { + } + + return QUERY_NOT_COMPLETED; +} + +/* + * forward the query range for next interval query + */ +void forwardIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0) { + if ((QUERY_IS_ASC_QUERY(pQuery) && pQuery->lastKey >= pQuery->ekey) || + (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->lastKey <= pQuery->ekey)) { setQueryStatus(pQuery, QUERY_COMPLETED); - return; + } else { + TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); } - if (pQuery->ekey < pSupporter->rawEKey) { - pQuery->ekey = pSupporter->rawEKey; - } + return; + } + + int32_t r = getNextIntervalQueryRange(pSupporter, pRuntimeEnv, &pQuery->skey, &pQuery->ekey); + if (r == QUERY_COMPLETED) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return; } + getNextLogicalQueryRange(pRuntimeEnv, &pRuntimeEnv->intervalWindow); + /* ensure the search in cache will return right position */ pQuery->lastKey = pQuery->skey; @@ -5599,7 +6181,7 @@ void forwardIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeE // bridge the gap in group by time function if ((nextTimestamp > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (nextTimestamp < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { - getAlignedIntervalQueryRange(pQuery, nextTimestamp, pSupporter->rawSKey, pSupporter->rawEKey); + getAlignedIntervalQueryRange(pRuntimeEnv, nextTimestamp, pSupporter->rawSKey, pSupporter->rawEKey); } } @@ -5634,33 +6216,33 @@ int32_t vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, tSidSet *pSidSet SVnodeObj *pVnode = &vnodeList[vid]; - char* buf = calloc(1, getCompHeaderSegSize(&pVnode->cfg)); + char *buf = calloc(1, getCompHeaderSegSize(&pVnode->cfg)); if (buf == NULL) { *numOfMeters = 0; return TSDB_CODE_SERV_OUT_OF_MEMORY; } - + SQueryFilesInfo *pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; - + int32_t headerSize = getCompHeaderSegSize(&pVnode->cfg); lseek(pVnodeFileInfo->headerFd, TSDB_FILE_HEADER_LEN, SEEK_SET); read(pVnodeFileInfo->headerFd, buf, headerSize); - + // check the offset value integrity if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, vid, buf - TSDB_FILE_HEADER_LEN, headerSize) < 0) { free(buf); *numOfMeters = 0; - + return TSDB_CODE_FILE_CORRUPTED; } - int64_t oldestKey = getOldestKey(pVnode->numOfFiles, pVnode->fileId, &pVnode->cfg); + int64_t oldestKey = getOldestKey(pVnode->numOfFiles, pVnode->fileId, &pVnode->cfg); (*pReqMeterDataInfo) = malloc(POINTER_BYTES * pSidSet->numOfSids); if (*pReqMeterDataInfo == NULL) { free(buf); *numOfMeters = 0; - + return TSDB_CODE_SERV_OUT_OF_MEMORY; } @@ -5668,7 +6250,7 @@ int32_t vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, tSidSet *pSidSet TSKEY skey, ekey; for (int32_t i = 0; i < pSidSet->numOfSids; ++i) { // load all meter meta info - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[i]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[i]->sid); if (pMeterObj == NULL) { dError("QInfo:%p failed to find required sid:%d", pQInfo, pMeterSidExtInfo[i]->sid); continue; @@ -5705,19 +6287,19 @@ int32_t vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, tSidSet *pSidSet } } - int64_t headerOffset = sizeof(SCompHeader) * pMeterObj->sid; + int64_t headerOffset = sizeof(SCompHeader) * pMeterObj->sid; SCompHeader *compHeader = (SCompHeader *)(buf + headerOffset); if (compHeader->compInfoOffset == 0) { // current table is empty continue; } - + // corrupted file may cause the invalid compInfoOffset, check needs int32_t compHeaderOffset = getCompHeaderStartPosition(&pVnode->cfg); if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, &pRuntimeEnv->vnodeFileInfo, compHeaderOffset) != TSDB_CODE_SUCCESS) { free(buf); *numOfMeters = 0; - + return TSDB_CODE_FILE_CORRUPTED; } @@ -5739,7 +6321,7 @@ int32_t vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, tSidSet *pSidSet } free(buf); - + return TSDB_CODE_SUCCESS; } @@ -5857,11 +6439,12 @@ void restoreIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, SMeterQueryInfo *p ((pQuery->lastKey <= pQuery->skey) && !QUERY_IS_ASC_QUERY(pQuery))); } -static void clearAllMeterDataBlockInfo(SMeterDataInfo** pMeterDataInfo, int32_t start, int32_t end) { - for(int32_t i = start; i < end; ++i) { +static void clearAllMeterDataBlockInfo(SMeterDataInfo **pMeterDataInfo, int32_t start, int32_t end) { + for (int32_t i = start; i < end; ++i) { tfree(pMeterDataInfo[i]->pBlock); pMeterDataInfo[i]->numOfBlocks = 0; - pMeterDataInfo[i]->start = 0; } + pMeterDataInfo[i]->start = -1; + } } static bool getValidDataBlocksRangeIndex(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, SCompBlock *pCompBlock, @@ -5908,7 +6491,7 @@ static bool setValidDataBlocks(SMeterDataInfo *pMeterDataInfo, int32_t end) { if (size != pMeterDataInfo->numOfBlocks) { memmove(pMeterDataInfo->pBlock, &pMeterDataInfo->pBlock[pMeterDataInfo->start], size * sizeof(SCompBlock)); - + char *tmp = realloc(pMeterDataInfo->pBlock, size * sizeof(SCompBlock)); if (tmp == NULL) { return false; @@ -5936,12 +6519,14 @@ static bool setCurrentQueryRange(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, } if (*minval > *maxval) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, pMeterQInfo->lastKey); + qTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, + pMeterQInfo->lastKey); return false; } else { - qTrace("QInfo:%p vid:%d sid:%d id:%s, query in files, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, pMeterQInfo->lastKey); + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in files, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, + pMeterQInfo->lastKey); return true; } } @@ -5960,49 +6545,54 @@ int32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuery SQueryCostSummary *pSummary = &pSupporter->runtimeEnv.summary; TSKEY minval, maxval; - + *numOfBlocks = 0; SQueryFilesInfo *pVnodeFileInfo = &pSupporter->runtimeEnv.vnodeFileInfo; - + // sequentially scan this header file to extract the compHeader info for (int32_t j = 0; j < numOfMeters; ++j) { SMeterObj *pMeterObj = pMeterDataInfo[j]->pMeterObj; lseek(pVnodeFileInfo->headerFd, pMeterDataInfo[j]->offsetInHeaderFile, SEEK_SET); - + SCompInfo compInfo = {0}; read(pVnodeFileInfo->headerFd, &compInfo, sizeof(SCompInfo)); - + int32_t ret = validateCompBlockInfoSegment(pQInfo, filePath, pMeterObj->vnode, &compInfo, pMeterDataInfo[j]->offsetInHeaderFile); if (ret != TSDB_CODE_SUCCESS) { // file corrupted - clearAllMeterDataBlockInfo(pMeterDataInfo, 0, j); + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); return TSDB_CODE_FILE_CORRUPTED; } if (compInfo.numOfBlocks <= 0 || compInfo.uid != pMeterDataInfo[j]->pMeterObj->uid) { - clearAllMeterDataBlockInfo(pMeterDataInfo, 0, j); + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); continue; } - + int32_t size = compInfo.numOfBlocks * sizeof(SCompBlock); size_t bufferSize = size + sizeof(TSCKSUM); - - pMeterDataInfo[j]->pBlock = calloc(1, bufferSize); - if (pMeterDataInfo[j]->pBlock == NULL) { - clearAllMeterDataBlockInfo(pMeterDataInfo, 0, j); + + pMeterDataInfo[j]->numOfBlocks = compInfo.numOfBlocks; + char* p = realloc(pMeterDataInfo[j]->pBlock, bufferSize); + if (p == NULL) { + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); return TSDB_CODE_SERV_OUT_OF_MEMORY; + } else { + memset(p, 0, bufferSize); + pMeterDataInfo[j]->pBlock = (SCompBlock*) p; } - + read(pVnodeFileInfo->headerFd, pMeterDataInfo[j]->pBlock, bufferSize); - TSCKSUM checksum = *(TSCKSUM*)((char*)pMeterDataInfo[j]->pBlock + size); + TSCKSUM checksum = *(TSCKSUM *)((char *)pMeterDataInfo[j]->pBlock + size); int64_t st = taosGetTimestampUs(); // check compblock integrity - ret = validateCompBlockSegment(pQInfo, filePath, &compInfo, (char*) pMeterDataInfo[j]->pBlock, pMeterObj->vnode, checksum); + ret = validateCompBlockSegment(pQInfo, filePath, &compInfo, (char *)pMeterDataInfo[j]->pBlock, pMeterObj->vnode, + checksum); if (ret != TSDB_CODE_SUCCESS) { - clearAllMeterDataBlockInfo(pMeterDataInfo, 0, j); + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); return TSDB_CODE_FILE_CORRUPTED; } @@ -6013,19 +6603,20 @@ int32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuery pSummary->loadCompInfoUs += (et - st); if (!setCurrentQueryRange(pMeterDataInfo[j], pQuery, pSupporter->rawEKey, &minval, &maxval)) { - clearAllMeterDataBlockInfo(pMeterDataInfo, j, j); + clearAllMeterDataBlockInfo(pMeterDataInfo, j, j + 1); continue; } int32_t end = 0; if (!getValidDataBlocksRangeIndex(pMeterDataInfo[j], pQuery, pMeterDataInfo[j]->pBlock, compInfo.numOfBlocks, minval, maxval, &end)) { - clearAllMeterDataBlockInfo(pMeterDataInfo, j, j); + // current table has no qualified data blocks, erase its information. + clearAllMeterDataBlockInfo(pMeterDataInfo, j, j + 1); continue; } if (!setValidDataBlocks(pMeterDataInfo[j], end)) { - clearAllMeterDataBlockInfo(pMeterDataInfo, 0, j); + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); pQInfo->killed = 1; // set query kill, abort current query since no memory available return TSDB_CODE_SERV_OUT_OF_MEMORY; @@ -6088,24 +6679,24 @@ static int32_t blockAccessOrderComparator(const void *pLeft, const void *pRight, return pLeftBlockInfoEx->pBlock.compBlock->offset > pRightBlockInfoEx->pBlock.compBlock->offset ? 1 : -1; } -void cleanBlockOrderSupporter(SBlockOrderSupporter* pSupporter, int32_t numOfTables) { +void cleanBlockOrderSupporter(SBlockOrderSupporter *pSupporter, int32_t numOfTables) { tfree(pSupporter->numOfBlocksPerMeter); tfree(pSupporter->blockIndexArray); - + for (int32_t i = 0; i < numOfTables; ++i) { tfree(pSupporter->pDataBlockInfoEx[i]); } - + tfree(pSupporter->pDataBlockInfoEx); } int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMeters, SMeterDataBlockInfoEx **pDataBlockInfoEx, int32_t numOfCompBlocks, - int32_t *nAllocBlocksInfoSize, int64_t addr) { + int32_t *numOfAllocBlocks, int64_t addr) { // release allocated memory first - freeDataBlockFieldInfo(*pDataBlockInfoEx, *nAllocBlocksInfoSize); + freeDataBlockFieldInfo(*pDataBlockInfoEx, *numOfAllocBlocks); - if (*nAllocBlocksInfoSize == 0 || *nAllocBlocksInfoSize < numOfCompBlocks) { + if (*numOfAllocBlocks == 0 || *numOfAllocBlocks < numOfCompBlocks) { char *tmp = realloc((*pDataBlockInfoEx), sizeof(SMeterDataBlockInfoEx) * numOfCompBlocks); if (tmp == NULL) { tfree(*pDataBlockInfoEx); @@ -6114,7 +6705,7 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet *pDataBlockInfoEx = (SMeterDataBlockInfoEx *)tmp; memset((*pDataBlockInfoEx), 0, sizeof(SMeterDataBlockInfoEx) * numOfCompBlocks); - *nAllocBlocksInfoSize = numOfCompBlocks; + *numOfAllocBlocks = numOfCompBlocks; } SBlockOrderSupporter supporter = {0}; @@ -6139,23 +6730,23 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet SCompBlock *pBlock = pMeterDataInfo[j]->pBlock; supporter.numOfBlocksPerMeter[numOfQualMeters] = pMeterDataInfo[j]->numOfBlocks; - char* buf = calloc(1, sizeof(SMeterDataBlockInfoEx) * pMeterDataInfo[j]->numOfBlocks); + char *buf = calloc(1, sizeof(SMeterDataBlockInfoEx) * pMeterDataInfo[j]->numOfBlocks); if (buf == NULL) { cleanBlockOrderSupporter(&supporter, numOfQualMeters); return TSDB_CODE_SERV_OUT_OF_MEMORY; } - - supporter.pDataBlockInfoEx[numOfQualMeters] = (SMeterDataBlockInfoEx*) buf; - for (int32_t k = 0; k < pMeterDataInfo[j]->numOfBlocks; ++k) { - SMeterDataBlockInfoEx *pInfoEx = &supporter.pDataBlockInfoEx[numOfQualMeters][k]; - - pInfoEx->pBlock.compBlock = &pBlock[k]; - pInfoEx->pBlock.fields = NULL; + supporter.pDataBlockInfoEx[numOfQualMeters] = (SMeterDataBlockInfoEx *)buf; - pInfoEx->pMeterDataInfo = pMeterDataInfo[j]; - pInfoEx->groupIdx = pMeterDataInfo[j]->groupIdx; // set the group index - pInfoEx->blockIndex = pMeterDataInfo[j]->start + k; // set the block index in original meter + for (int32_t k = 0; k < pMeterDataInfo[j]->numOfBlocks; ++k) { + SMeterDataBlockInfoEx *pBlockInfoEx = &supporter.pDataBlockInfoEx[numOfQualMeters][k]; + + pBlockInfoEx->pBlock.compBlock = &pBlock[k]; + pBlockInfoEx->pBlock.fields = NULL; + + pBlockInfoEx->pMeterDataInfo = pMeterDataInfo[j]; + pBlockInfoEx->groupIdx = pMeterDataInfo[j]->groupIdx; // set the group index + pBlockInfoEx->blockIndex = pMeterDataInfo[j]->start + k; // set the block index in original meter cnt++; } @@ -6164,7 +6755,7 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet dTrace("QInfo %p create data blocks info struct completed", addr); - assert(cnt == numOfCompBlocks && numOfQualMeters <= numOfMeters); // the pMeterDataInfo[j]->numOfBlocks may be 0 + assert(cnt == numOfCompBlocks && numOfQualMeters <= numOfMeters); // the pMeterDataInfo[j]->numOfBlocks may be 0 supporter.numOfMeters = numOfQualMeters; SLoserTreeInfo *pTree = NULL; @@ -6187,7 +6778,7 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet if (supporter.blockIndexArray[pos] >= supporter.numOfBlocksPerMeter[pos]) { supporter.blockIndexArray[pos] = supporter.numOfBlocksPerMeter[pos] + 1; } - + tLoserTreeAdjust(pTree, pos + supporter.numOfMeters); } @@ -6321,7 +6912,7 @@ void setCtxOutputPointerForSupplementScan(SMeterQuerySupportObj *pSupporter, SMe // the first column is always the timestamp for interval query TSKEY ts = *(TSKEY *)pRuntimeEnv->pCtx[0].aOutputBuf; SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - qTrace("QInfo:%p vid:%d sid:%d id:%s, set output result pointer, ts:%lld, index:%d", GET_QINFO_ADDR(pQuery), + qTrace("QInfo:%p vid:%d sid:%d id:%s, set output result pointer, ts:%" PRId64 ", index:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->reverseIndex); } @@ -6421,8 +7012,8 @@ int32_t setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int3 return 0; } -static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pInfo, - SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields, +static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, + SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, SField *pFields, __block_search_fn_t searchFn) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; @@ -6433,14 +7024,14 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete while (1) { int32_t numOfRes = 0; - int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryCol, sdata, pFields, searchFn, &numOfRes); + int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryCol, pFields, searchFn, &numOfRes); assert(steps > 0); // NOTE: in case of stable query, only ONE(or ZERO) row of result generated for each query range - if (pInfo->lastResRows == 0) { - pInfo->lastResRows = numOfRes; + if (pMeterQueryInfo->lastResRows == 0) { + pMeterQueryInfo->lastResRows = numOfRes; } else { - assert(pInfo->lastResRows == 1); + assert(pMeterQueryInfo->lastResRows == 1); } int32_t pos = pQuery->pos + steps * factor; @@ -6479,20 +7070,20 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete if (pQuery->lastKey > pSupporter->rawEKey || nextKey > pSupporter->rawEKey) { /* whole query completed, save result and abort */ assert(queryCompleted); - saveResult(pSupporter, pInfo, pInfo->lastResRows); + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); // save the pQuery->lastKey for retrieve data in cache, actually, there will be no qualified data in cache. - saveIntervalQueryRange(pRuntimeEnv, pInfo); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); } else if (pQuery->ekey == pBlockInfo->keyLast) { /* current interval query is completed, set the next query range on other data blocks if exist */ int64_t prevEKey = pQuery->ekey; - getAlignedIntervalQueryRange(pQuery, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); - saveIntervalQueryRange(pRuntimeEnv, pInfo); + getAlignedIntervalQueryRange(pRuntimeEnv, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); assert(queryCompleted && prevEKey < pQuery->skey); - if (pInfo->lastResRows > 0) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); + if (pMeterQueryInfo->lastResRows > 0) { + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); } } else { /* @@ -6503,7 +7094,7 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete * With the information of the directly next data block, whether locates in cache or disk, * current interval query being completed or not can be decided. */ - saveIntervalQueryRange(pRuntimeEnv, pInfo); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); assert(pQuery->lastKey > pBlockInfo->keyLast && pQuery->lastKey <= pQuery->ekey); /* @@ -6511,7 +7102,7 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete * merge with other meters in the same group */ if (queryCompleted) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); } } @@ -6527,23 +7118,23 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete if (pQuery->lastKey < pSupporter->rawEKey || (nextKey < pSupporter->rawEKey && nextKey != -1)) { /* whole query completed, save result and abort */ assert(queryCompleted); - saveResult(pSupporter, pInfo, pInfo->lastResRows); + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); /* * save the pQuery->lastKey for retrieve data in cache, actually, * there will be no qualified data in cache. */ - saveIntervalQueryRange(pRuntimeEnv, pInfo); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); } else if (pQuery->ekey == pBlockInfo->keyFirst) { // current interval query is completed, set the next query range on other data blocks if exist int64_t prevEKey = pQuery->ekey; - getAlignedIntervalQueryRange(pQuery, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); - saveIntervalQueryRange(pRuntimeEnv, pInfo); + getAlignedIntervalQueryRange(pRuntimeEnv, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); assert(queryCompleted && prevEKey > pQuery->skey); - if (pInfo->lastResRows > 0) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); + if (pMeterQueryInfo->lastResRows > 0) { + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); } } else { /* @@ -6554,7 +7145,7 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete * With the information of the directly next data block, whether locates in cache or disk, * current interval query being completed or not can be decided. */ - saveIntervalQueryRange(pRuntimeEnv, pInfo); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); assert(pQuery->lastKey < pBlockInfo->keyFirst && pQuery->lastKey >= pQuery->ekey); /* @@ -6562,7 +7153,7 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete * flag, and merge with other meters in the same group */ if (queryCompleted) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); } } @@ -6571,14 +7162,14 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete } assert(queryCompleted); - saveResult(pSupporter, pInfo, pInfo->lastResRows); + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); assert((nextKey >= pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || (nextKey <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); /* still in the same block to query */ - getAlignedIntervalQueryRange(pQuery, nextKey, pSupporter->rawSKey, pSupporter->rawEKey); - saveIntervalQueryRange(pRuntimeEnv, pInfo); + getAlignedIntervalQueryRange(pRuntimeEnv, nextKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); int32_t newPos = searchFn((char *)pPrimaryCol, pBlockInfo->size, pQuery->skey, pQuery->order.order); assert(newPos == pQuery->pos + steps * factor); @@ -6587,6 +7178,104 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete } } +static void doApplyIntervalQueryOnBlock_rv(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, + SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, SField *pFields, + __block_search_fn_t searchFn) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + while (1) { + int64_t nextKey = -1; + int32_t numOfRes = 0; + + int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryCol, pFields, searchFn, &numOfRes); + assert(steps > 0); + + // NOTE: in case of stable query, only ONE(or ZERO) row of result generated for each query range + if (pMeterQueryInfo->lastResRows == 0) { + pMeterQueryInfo->lastResRows = numOfRes; + } else { + assert(pMeterQueryInfo->lastResRows == 1); + } + + int32_t pos = pQuery->pos + steps * factor; + + // query does not reach the end of current block + if ((pos < pBlockInfo->size && QUERY_IS_ASC_QUERY(pQuery)) || (pos >= 0 && !QUERY_IS_ASC_QUERY(pQuery))) { + nextKey = pPrimaryCol[pos]; + } else { + assert((pQuery->lastKey > pBlockInfo->keyLast && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->lastKey < pBlockInfo->keyFirst && !QUERY_IS_ASC_QUERY(pQuery))); + } + + // all data satisfy current query are checked, query completed + bool completed = false; + if (QUERY_IS_ASC_QUERY(pQuery)) { + completed = (pQuery->lastKey > pQuery->ekey); + } else { + completed = (pQuery->lastKey < pQuery->ekey); + } + + /* + * 1. there may be more date that satisfy current query interval, other than + * current block, we need to try next data blocks + * 2. query completed, since reaches the upper bound of the main query range + */ + if (!completed) { + /* + * Data that satisfy current query range may locate in current block and blocks that are directly right + * next to current block. Therefore, we need to keep the query range(interval) unchanged until reaching + * the direct next data block, while only forwards the pQuery->lastKey. + * + * With the information of the directly next data block, whether locates in cache or disk, + * current interval query being completed or not can be decided. + */ + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + + if (QUERY_IS_ASC_QUERY(pQuery)) { + assert(pQuery->lastKey > pBlockInfo->keyLast && pQuery->lastKey <= pQuery->ekey); + } else { + assert(pQuery->lastKey < pBlockInfo->keyFirst && pQuery->lastKey >= pQuery->ekey); + } + + break; + } + + assert(completed); + + if (pQuery->ekey == pSupporter->rawEKey) { + /* whole query completed, save result and abort */ + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); + + // save the pQuery->lastKey for retrieve data in cache, actually, there will be no qualified data in cache. + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + + return; + } else if ((QUERY_IS_ASC_QUERY(pQuery) && pQuery->ekey == pBlockInfo->keyLast) || + (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->ekey == pBlockInfo->keyFirst)) { + /* current interval query is completed, set the next query range on other data blocks if exist */ + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + return; + } + + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); + + assert(pos >= 0 && pos < pBlockInfo->size); + assert((nextKey >= pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextKey <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); + + /* still in the same block to query */ + getAlignedIntervalQueryRange(pRuntimeEnv, nextKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + + int32_t newPos = searchFn((char *)pPrimaryCol, pBlockInfo->size, pQuery->skey, pQuery->order.order); + assert(newPos == pQuery->pos + steps * factor); + + pQuery->pos = newPos; + } + +} int64_t getNextAccessedKeyInData(SQuery *pQuery, int64_t *pPrimaryCol, SBlockInfo *pBlockInfo, int32_t blockStatus) { assert(pQuery->pos >= 0 && pQuery->pos <= pBlockInfo->size - 1); @@ -6624,7 +7313,7 @@ void setIntervalQueryRange(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportO * last query on this block of the meter is done, start next interval on this block * otherwise, keep the previous query range and proceed */ - getAlignedIntervalQueryRange(pQuery, key, pSupporter->rawSKey, pSupporter->rawEKey); + getAlignedIntervalQueryRange(pRuntimeEnv, key, pSupporter->rawSKey, pSupporter->rawEKey); saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); // previous query does not be closed, save the results and close it @@ -6644,7 +7333,7 @@ void setIntervalQueryRange(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportO return; } - getAlignedIntervalQueryRange(pQuery, pQuery->skey, pSupporter->rawSKey, pSupporter->rawEKey); + getAlignedIntervalQueryRange(pRuntimeEnv, pQuery->skey, pSupporter->rawSKey, pSupporter->rawEKey); saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); pMeterQueryInfo->queryRangeSet = 1; } @@ -6714,13 +7403,13 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blk pQuery->pSelectExpr[i].pBase.colInfo.colId, *blkStatus); } - if (pRuntimeEnv->pTSBuf > 0) { + if (pRuntimeEnv->pTSBuf > 0 || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { req |= BLK_DATA_ALL_NEEDED; } } if (req == BLK_DATA_NO_NEEDED) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, data block ignored, brange:%lld-%lld, rows:%d", + qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, data block ignored, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); @@ -6751,14 +7440,15 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blk dTrace("QInfo:%p fileId:%d, slot:%d, block discarded by per-filter, ", GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot); #endif - qTrace("QInfo:%p id:%s slot:%d, data block ignored by pre-filter, fields loaded, brange:%lld-%lld, rows:%d", + qTrace("QInfo:%p id:%s slot:%d, data block ignored by pre-filter, fields loaded, brange:%" PRId64 "-%" PRId64 + ", rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); return DISK_DATA_DISCARDED; } } - SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_FILE_BLOCK); bool loadTS = needPrimaryTimestampCol(pQuery, &binfo); /* @@ -6824,7 +7514,7 @@ int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQue * scan also no result generated. The index can be backwards moved. * * However, if during the main scan, there is a result generated, such as applies count to timestamp, which - * always generates a result, but applies last query to a NULL column may fail to generate no results during the + * always generates a result, but applies last query to a NULL column may fail to generate results during the * supplement scan. * * NOTE: @@ -6864,8 +7554,8 @@ int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQue TSKEY ts = *(TSKEY *)getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, 0); SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - qTrace("QInfo:%p vid:%d sid:%d id:%s, save results, ts:%lld, total:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->numOfRes + 1); + qTrace("QInfo:%p vid:%d sid:%d id:%s, save results, ts:%" PRId64 ", total:%d", GET_QINFO_ADDR(pQuery), + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->numOfRes + 1); pData->numOfElems += numOfResult; pMeterQueryInfo->numOfRes += numOfResult; @@ -6904,8 +7594,8 @@ static int32_t getSubsetNumber(SMeterQuerySupportObj *pSupporter) { SQuery *pQuery = pSupporter->runtimeEnv.pQuery; int32_t totalSubset = 0; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - totalSubset = pSupporter->runtimeEnv.usedIndex; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { + totalSubset = numOfClosedSlidingWindow(&pSupporter->runtimeEnv.swindowResInfo); } else { totalSubset = pSupporter->pSidSet->numOfSubSet; } @@ -6985,36 +7675,27 @@ void copyFromGroupBuf(SQInfo *pQInfo, SOutputRes *result) { SQuery * pQuery = &pQInfo->query; SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; - int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSQL_SO_DESC; - + int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSQL_SO_ASC; int32_t numOfResult = doCopyFromGroupBuf(pSupporter, result, orderType); pQuery->pointsRead += numOfResult; assert(pQuery->pointsRead <= pQuery->pointsToRead); } -// todo refactor according to its called env!! -static void getAlignedIntervalQueryRange(SQuery *pQuery, TSKEY keyInData, TSKEY skey, TSKEY ekey) { - if (pQuery->nAggTimeInterval == 0) { - return; - } - - doGetAlignedIntervalQueryRange(pQuery, keyInData, skey, ekey); -} - -static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, - int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, - SField *pFields, __block_search_fn_t searchFn) { +static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pMeterDataInfo, + SBlockInfo *pBlockInfo, int32_t blockStatus, SField *pFields, + __block_search_fn_t searchFn) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterQueryInfo * pInfo = pInfoEx->pMeterQInfo; + SMeterQueryInfo * pMeterQueryInfo = pMeterDataInfo->pMeterQInfo; + int64_t* pPrimaryKey = (int64_t*) pRuntimeEnv->primaryColBuffer->data; /* * for each block, we need to handle the previous query, since the determination of previous query being completed * or not is based on the start key of current block. */ - TSKEY key = getNextAccessedKeyInData(pQuery, pPrimaryData, pBlockInfo, blockStatus); - setIntervalQueryRange(pInfoEx->pMeterQInfo, pSupporter, key); + TSKEY key = getNextAccessedKeyInData(pQuery, pPrimaryKey, pBlockInfo, blockStatus); + setIntervalQueryRange(pMeterDataInfo->pMeterQInfo, pSupporter, key); if (((pQuery->skey > pQuery->ekey) && QUERY_IS_ASC_QUERY(pQuery)) || ((pQuery->skey < pQuery->ekey) && !QUERY_IS_ASC_QUERY(pQuery))) { @@ -7025,18 +7706,18 @@ static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterD ((pBlockInfo->keyFirst > pQuery->ekey) && !QUERY_IS_ASC_QUERY(pQuery))) { int32_t numOfRes = 0; /* current block is included in this interval */ - int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryData, data, pFields, searchFn, &numOfRes); + int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryKey, pFields, searchFn, &numOfRes); assert(numOfRes <= 1 && numOfRes >= 0 && steps > 0); - if (pInfo->lastResRows == 0) { - pInfo->lastResRows = numOfRes; + if (pMeterQueryInfo->lastResRows == 0) { + pMeterQueryInfo->lastResRows = numOfRes; } else { - assert(pInfo->lastResRows == 1); + assert(pMeterQueryInfo->lastResRows == 1); } - saveIntervalQueryRange(pRuntimeEnv, pInfo); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); } else { - doApplyIntervalQueryOnBlock(pSupporter, pInfo, pBlockInfo, pPrimaryData, data, pFields, searchFn); + doApplyIntervalQueryOnBlock(pSupporter, pMeterQueryInfo, pBlockInfo, pPrimaryKey, pFields, searchFn); } } @@ -7082,7 +7763,6 @@ bool vnodeHasRemainResults(void *handle) { SQuery * pQuery = pRuntimeEnv->pQuery; SInterpolationInfo *pInterpoInfo = &pRuntimeEnv->interpoInfo; - if (pQuery->limit.limit > 0 && pQInfo->pointsRead >= pQuery->limit.limit) { return false; } @@ -7181,7 +7861,7 @@ int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows) { // make sure file exist if (FD_VALID(fd)) { size_t s = lseek(fd, 0, SEEK_END); - dTrace("QInfo:%p ts comp data return, file:%s, size:%lld", pQInfo, pQuery->sdata[0]->data, s); + dTrace("QInfo:%p ts comp data return, file:%s, size:%zu", pQInfo, pQuery->sdata[0]->data, s); lseek(fd, 0, SEEK_SET); read(fd, data, s); @@ -7217,8 +7897,8 @@ int32_t vnodeQueryResultInterpolate(SQInfo *pQInfo, tFilePage **pDst, tFilePage int32_t ret = resultInterpolate(pQInfo, pDst, pDataSrc, numOfRows, numOfFinalRows); assert(ret == numOfFinalRows); + /* reached the start position of according to offset value, return immediately */ if (pQuery->limit.offset == 0) { - /* reached the start position of according to offset value, return immediately */ return ret; } @@ -7226,18 +7906,18 @@ int32_t vnodeQueryResultInterpolate(SQInfo *pQInfo, tFilePage **pDst, tFilePage ret -= pQuery->limit.offset; // todo !!!!there exactly number of interpo is not valid. // todo refactor move to the beginning of buffer - if (QUERY_IS_ASC_QUERY(pQuery)) { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].resBytes * pQuery->limit.offset, - ret * pQuery->pSelectExpr[i].resBytes); - } - } else { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memmove(pDst[i]->data + (pQuery->pointsToRead - ret) * pQuery->pSelectExpr[i].resBytes, - pDst[i]->data + (pQuery->pointsToRead - ret - pQuery->limit.offset) * pQuery->pSelectExpr[i].resBytes, - ret * pQuery->pSelectExpr[i].resBytes); - } + // if (QUERY_IS_ASC_QUERY(pQuery)) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].resBytes * pQuery->limit.offset, + ret * pQuery->pSelectExpr[i].resBytes); } + // } else { + // for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + // memmove(pDst[i]->data + (pQuery->pointsToRead - ret) * pQuery->pSelectExpr[i].resBytes, + // pDst[i]->data + (pQuery->pointsToRead - ret - pQuery->limit.offset) * + // pQuery->pSelectExpr[i].resBytes, ret * pQuery->pSelectExpr[i].resBytes); + // } + // } pQuery->limit.offset = 0; return ret; } else { diff --git a/src/system/detail/src/vnodeQueryProcess.c b/src/system/detail/src/vnodeQueryProcess.c index c69b27537e82fc7a58aff764276bc8d932f18a1c..6fe8b2fa775b57799b150f57ba3f0eb71e7ab3f6 100644 --- a/src/system/detail/src/vnodeQueryProcess.c +++ b/src/system/detail/src/vnodeQueryProcess.c @@ -85,14 +85,25 @@ static void setStartPositionForCacheBlock(SQuery *pQuery, SCacheBlock *pBlock, b } } -static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { +static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[i]); + if (pResInfo != NULL) { + pResInfo->complete = false; + } + } +} + +static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { SQuery * pQuery = &pQInfo->query; SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SQueryRuntimeEnv * pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; - SMeterObj *pTempMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid); + SMeterObj *pTempMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[0]->sid); assert(pTempMeterObj != NULL); __block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeterObj->searchAlgorithm]; @@ -107,11 +118,11 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe int32_t end = pSupporter->pSidSet->starterPos[groupIdx + 1] - 1; if (isQueryKilled(pQuery)) { - return pMeterInfo; + return; } for (int32_t k = start; k <= end; ++k) { - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[k]->sid); if (pMeterObj == NULL) { dError("QInfo:%p failed to find meterId:%d, continue", pQInfo, pMeterSidExtInfo[k]->sid); continue; @@ -147,8 +158,8 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { dTrace( - "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan data in cache. qrange:%lld-%lld, " - "lastKey:%lld", + "QInfo:%p vid:%d sid:%d id:%s, query completed, ignore data in cache. qrange:%" PRId64 "-%" PRId64 ", " + "lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); @@ -160,11 +171,11 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe int32_t ret = setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo); if (ret != TSDB_CODE_SUCCESS) { pQInfo->killed = 1; - return NULL; + return; } } - qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); /* @@ -176,14 +187,14 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe */ TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, true); if (nextKey < 0) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in cache, cache blocks:%d, lastKey:%lld", pQInfo, + qTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in cache, cache blocks:%d, lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->numOfBlocks, pQuery->lastKey); continue; } // data in this block may be flushed to disk and this block is allocated to other meter // todo try with remain cache blocks - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); if (pBlock == NULL) { continue; } @@ -196,16 +207,13 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; for (int32_t i = 0; i < pCacheInfo->maxBlocks; ++i) { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); /* * 1. pBlock == NULL. The cache block may be flushed to disk, so it is not available, skip and try next - * - * 2. pBlock->numOfPoints == 0. There is a empty block, which is caused by allocate-and-write data into cache - * procedure. The block has been allocated but data has not been put into yet. If the block is the last - * block(newly allocated block), abort query. Otherwise, skip it and go on. + * The check for empty block is refactor to getCacheDataBlock function */ - if ((pBlock == NULL) || (pBlock->numOfPoints == 0)) { + if (pBlock == NULL) { if (ALL_CACHE_BLOCKS_CHECKED(pQuery)) { break; } @@ -216,8 +224,8 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe setStartPositionForCacheBlock(pQuery, pBlock, &firstCheckSlot); - TSKEY *primaryKeys = (TSKEY *)pBlock->offset[0]; - + TSKEY* primaryKeys = (TSKEY*) pRuntimeEnv->primaryColBuffer->data; + // in handling file data block, the timestamp range validation is done during fetching candidate file blocks if ((primaryKeys[pQuery->pos] > pSupporter->rawEKey && QUERY_IS_ASC_QUERY(pQuery)) || (primaryKeys[pQuery->pos] < pSupporter->rawEKey && !QUERY_IS_ASC_QUERY(pQuery))) { @@ -226,15 +234,14 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe // only record the key on last block SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus); - SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK); + SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_CACHE_BLOCK); - dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d", + dTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", fileId:%d, slot:%d, pos:%d, bstatus:%d", GET_QINFO_ADDR(pQuery), binfo.keyFirst, binfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, pRuntimeEnv->blockStatus); totalBlocks++; - queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pBlock, &binfo, &pMeterInfo[k], NULL, - searchFn); + queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, &binfo, &pMeterInfo[k], NULL, searchFn); if (ALL_CACHE_BLOCKS_CHECKED(pQuery)) { break; @@ -255,18 +262,16 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe dTrace("QInfo:%p complete check %d cache blocks, elapsed time:%.3fms", pQInfo, totalBlocks, time / 1000.0); setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - - return pMeterInfo; } -static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo) { +static void queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo) { SQuery * pQuery = &pQInfo->query; SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; SMeterDataBlockInfoEx *pDataBlockInfoEx = NULL; int32_t nAllocBlocksInfoSize = 0; - SMeterObj * pTempMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pMeterSidExtInfo[0]->sid); + SMeterObj * pTempMeter = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pMeterSidExtInfo[0]->sid); __block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeter->searchAlgorithm]; int32_t vnodeId = pTempMeter->vnode; @@ -313,7 +318,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe pQInfo->code = -ret; pQInfo->killed = 1; - return NULL; + return; } dTrace("QInfo:%p file:%s, %d meters qualified", pQInfo, pVnodeFileInfo->dataFilePath, numOfQualifiedMeters); @@ -335,7 +340,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe pQInfo->code = -ret; pQInfo->killed = 1; - return NULL; + return; } dTrace("QInfo:%p file:%s, %d meters contains %d blocks to be checked", pQInfo, pVnodeFileInfo->dataFilePath, @@ -355,7 +360,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe pQInfo->code = -ret; pQInfo->killed = 1; - return NULL; + return; } dTrace("QInfo:%p start to load %d blocks and check", pQInfo, numOfBlocks); @@ -378,7 +383,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe stimeUnit = taosGetTimestampMs(); } else if ((j % TRACE_OUTPUT_BLOCK_CNT) == 0) { etimeUnit = taosGetTimestampMs(); - dTrace("QInfo:%p load and check %ld blocks, and continue. elapsed:%ldms", pQInfo, TRACE_OUTPUT_BLOCK_CNT, + dTrace("QInfo:%p load and check %" PRId64 " blocks, and continue. elapsed:%" PRId64 " ms", pQInfo, TRACE_OUTPUT_BLOCK_CNT, etimeUnit - stimeUnit); stimeUnit = taosGetTimestampMs(); } @@ -397,8 +402,8 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { qTrace( - "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan this data block. qrange:%lld-%lld, " - "lastKey:%lld", + "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan this data block. qrange:%" PRId64 "-%" PRId64 ", " + "lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); @@ -412,7 +417,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe if (ret != TSDB_CODE_SUCCESS) { tfree(pReqMeterDataInfo); // error code has been set pQInfo->killed = 1; - return NULL; + return; } } @@ -425,7 +430,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe continue; } - SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_FILE_BLOCK); assert(pQuery->pos >= 0 && pQuery->pos < pBlock->numOfPoints); TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; @@ -441,8 +446,8 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe (pBlock->keyFirst >= pQuery->ekey && pBlock->keyLast <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); } - queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pRuntimeEnv->colDataBuffer, &binfo, - pOneMeterDataInfo, pInfoEx->pBlock.fields, searchFn); + queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, &binfo, pOneMeterDataInfo, pInfoEx->pBlock.fields, + searchFn); } tfree(pReqMeterDataInfo); @@ -461,8 +466,6 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe setQueryStatus(pQuery, QUERY_NOT_COMPLETED); freeMeterBlockInfoEx(pDataBlockInfoEx, nAllocBlocksInfoSize); - - return pMeterDataInfo; } static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool *dataInCache, int32_t index, @@ -475,7 +478,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[index]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[index]->sid); if (pMeterObj == NULL) { dError("QInfo:%p do not find required meter id: %d, all meterObjs id is:", pQInfo, pMeterSidExtInfo[index]->sid); return false; @@ -483,18 +486,21 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * vnodeSetTagValueInParam(pSupporter->pSidSet, pRuntimeEnv, pMeterSidExtInfo[index]); - dTrace("QInfo:%p query on (%d): vid:%d sid:%d meterId:%s, qrange:%lld-%lld", pQInfo, index - start, pMeterObj->vnode, + dTrace("QInfo:%p query on (%d): vid:%d sid:%d meterId:%s, qrange:%" PRId64 "-%" PRId64, pQInfo, index - start, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey); pQInfo->pObj = pMeterObj; pQuery->lastKey = pQuery->skey; pRuntimeEnv->pMeterObj = pMeterObj; + + vnodeUpdateQueryColumnIndex(pQuery, pRuntimeEnv->pMeterObj); + vnodeUpdateFilterColumnIndex(pQuery); vnodeCheckIfDataExists(pRuntimeEnv, pMeterObj, dataInDisk, dataInCache); // data in file or cache is not qualified for the query. abort if (!(dataInCache || dataInDisk)) { - dTrace("QInfo:%p vid:%d sid:%d meterId:%s, qrange:%lld-%lld, nores, %p", pQInfo, pMeterObj->vnode, pMeterObj->sid, + dTrace("QInfo:%p vid:%d sid:%d meterId:%s, qrange:%" PRId64 "-%" PRId64 ", nores, %p", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery); return false; } @@ -513,6 +519,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * } } + initCtxOutputBuf(pRuntimeEnv); return true; } @@ -536,7 +543,7 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start SPointInterpoSupporter pointInterpSupporter = {0}; pointInterpSupporterInit(pQuery, &pointInterpSupporter); - if (!normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter)) { + if (!normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter, NULL)) { pointInterpSupporterDestroy(&pointInterpSupporter); return 0; } @@ -549,11 +556,9 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start pointInterpSupporterDestroy(&pointInterpSupporter); vnodeScanAllData(pRuntimeEnv); - + // first/last_row query, do not invoke the finalize for super table query - if (!isFirstLastRowQuery(pQuery)) { - doFinalizeResult(pRuntimeEnv); - } + doFinalizeResult(pRuntimeEnv); int64_t numOfRes = getNumOfResult(pRuntimeEnv); assert(numOfRes == 1 || numOfRes == 0); @@ -567,7 +572,14 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start return numOfRes; } -static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { +/** + * super table query handler + * 1. super table projection query, group-by on normal columns query, ts-comp query + * 2. point interpolation query, last row query + * + * @param pQInfo + */ +static void vnodeSTableSeqProcessor(SQInfo *pQInfo) { SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; @@ -576,11 +588,11 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; tSidSet *pSids = pSupporter->pSidSet; - SMeterObj *pOneMeter = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid); - - resetCtxOutputBuf(pRuntimeEnv); - + int32_t vid = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[0]->sid)->vnode; + if (isPointInterpoQuery(pQuery)) { + resetCtxOutputBuf(pRuntimeEnv); + assert(pQuery->limit.offset == 0 && pQuery->limit.limit != 0); while (pSupporter->subgroupIdx < pSids->numOfSubSet) { @@ -588,7 +600,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { int32_t end = pSids->starterPos[pSupporter->subgroupIdx + 1] - 1; if (isFirstLastRowQuery(pQuery)) { - dTrace("QInfo:%p last_row query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode, + dTrace("QInfo:%p last_row query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, vid, pSids->numOfSubSet, pSupporter->subgroupIdx); TSKEY key = -1; @@ -604,7 +616,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } // get the last key of meters that belongs to this group - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[k]->sid); if (pMeterObj != NULL) { if (key < pMeterObj->lastKey) { key = pMeterObj->lastKey; @@ -621,7 +633,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { int64_t num = doCheckMetersInGroup(pQInfo, index, start); assert(num >= 0); } else { - dTrace("QInfo:%p interp query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode, + dTrace("QInfo:%p interp query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, vid, pSids->numOfSubSet, pSupporter->subgroupIdx); for (int32_t k = start; k <= end; ++k) { @@ -648,7 +660,9 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } } } else { - // this procedure treats all tables as single group + /* + * 1. super table projection query, 2. group-by on normal columns query, 3. ts-comp query + */ assert(pSupporter->meterIdx >= 0); /* @@ -667,18 +681,10 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { if (pSupporter->meterIdx >= pSids->numOfSids) { return; } - - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *pOneRes = &pRuntimeEnv->pResult[i]; - clearGroupResultBuf(pOneRes, pQuery->numOfOutputCols); - } - - pRuntimeEnv->usedIndex = 0; - taosCleanUpIntHash(pRuntimeEnv->hashList); - - int32_t primeHashSlot = 10039; - pRuntimeEnv->hashList = taosInitIntHash(primeHashSlot, POINTER_BYTES, taosHashInt); - + + resetCtxOutputBuf(pRuntimeEnv); + resetSlidingWindowInfo(&pRuntimeEnv->swindowResInfo, pQuery->numOfOutputCols); + while (pSupporter->meterIdx < pSupporter->numOfMeters) { int32_t k = pSupporter->meterIdx; @@ -686,6 +692,12 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); return; } + + + TSKEY skey = pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key; + if (skey > 0) { + pQuery->skey = skey; + } bool dataInDisk = true; bool dataInCache = true; @@ -704,7 +716,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { #endif SPointInterpoSupporter pointInterpSupporter = {0}; - if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter) == false) { + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter, NULL) == false) { pQuery->skey = pSupporter->rawSKey; pQuery->ekey = pSupporter->rawEKey; @@ -725,9 +737,6 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } } - vnodeUpdateQueryColumnIndex(pQuery, pRuntimeEnv->pMeterObj); - vnodeUpdateFilterColumnIndex(pQuery); - vnodeScanAllData(pRuntimeEnv); pQuery->pointsRead = getNumOfResult(pRuntimeEnv); @@ -738,7 +747,10 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pSupporter->meterIdx = pSupporter->pSidSet->numOfSids; break; } - + + // enable execution for next table, when handling the projection query + enableExecutionForNextTable(pRuntimeEnv); + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { /* * query range is identical in terms of all meters involved in query, @@ -750,14 +762,15 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pQuery->ekey = pSupporter->rawEKey; pSupporter->meterIdx++; + pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key = pQuery->lastKey; + // if the buffer is full or group by each table, we need to jump out of the loop if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL) || isGroupbyEachTable(pQuery->pGroupbyExpr, pSupporter->pSidSet)) { break; } - } else { - // forward query range + } else { // forward query range pQuery->skey = pQuery->lastKey; // all data in the result buffer are skipped due to the offset, continue to retrieve data from current meter @@ -765,6 +778,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { assert(!Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)); continue; } else { + pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key = pQuery->lastKey; // buffer is full, wait for the next round to retrieve data from current meter assert(Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)); break; @@ -773,7 +787,18 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } } - if (!isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isFirstLastRowQuery(pQuery)) { + /* + * 1. super table projection query, group-by on normal columns query, ts-comp query + * 2. point interpolation query, last row query + * + * group-by on normal columns query and last_row query do NOT invoke the finalizer here, + * since the finalize stage will be done at the client side. + * + * projection query, point interpolation query do not need the finalizer. + * + * Only the ts-comp query requires the finalizer function to be executed here. + */ + if (isTSCompQuery(pQuery)) { doFinalizeResult(pRuntimeEnv); } @@ -781,9 +806,14 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur; } + // todo refactor if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo* pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; + pSlidingWindowInfo->pStatus[i].closed = true; // enable return all results for group by normal columns + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { buf->numOfRows = MAX(buf->numOfRows, buf->resultInfo[j].numOfRes); } @@ -791,18 +821,16 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pQInfo->pMeterQuerySupporter->subgroupIdx = 0; pQuery->pointsRead = 0; - copyFromGroupBuf(pQInfo, pRuntimeEnv->pResult); + copyFromGroupBuf(pQInfo, pSlidingWindowInfo->pResult); } pQInfo->pointsRead += pQuery->pointsRead; pQuery->pointsOffset = pQuery->pointsToRead; - moveDescOrderResultsToFront(pRuntimeEnv); - dTrace( "QInfo %p vid:%d, numOfMeters:%d, index:%d, numOfGroups:%d, %d points returned, totalRead:%d totalReturn:%d," - "next skey:%lld, offset:%lld", - pQInfo, pOneMeter->vnode, pSids->numOfSids, pSupporter->meterIdx, pSids->numOfSubSet, pQuery->pointsRead, + "next skey:%" PRId64 ", offset:%" PRId64, + pQInfo, vid, pSids->numOfSids, pSupporter->meterIdx, pSids->numOfSubSet, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->skey, pQuery->limit.offset); } @@ -811,19 +839,19 @@ static void doOrderedScan(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; if (QUERY_IS_ASC_QUERY(pQuery)) { - pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); if (pQInfo->code != TSDB_CODE_SUCCESS) { return; } - pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); } else { - pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); if (pQInfo->code != TSDB_CODE_SUCCESS) { return; } - pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); } } @@ -911,7 +939,7 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { return; } - dTrace("QInfo:%p query start, qrange:%lld-%lld, order:%d, group:%d", pQInfo, pSupporter->rawSKey, pSupporter->rawEKey, + dTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", order:%d, group:%d", pQInfo, pSupporter->rawSKey, pSupporter->rawEKey, pQuery->order.order, pSupporter->pSidSet->numOfSubSet); dTrace("QInfo:%p main query scan start", pQInfo); @@ -960,7 +988,7 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { * select count(*)/top(field,k)/avg(field name) from table_name [where ts>now-1a]; * select count(*) from table_name group by status_column; */ -static void vnodeSingleMeterFixedOutputProcessor(SQInfo *pQInfo) { +static void vnodeSingleTableFixedOutputProcessor(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; @@ -983,20 +1011,13 @@ static void vnodeSingleMeterFixedOutputProcessor(SQInfo *pQInfo) { assert(isTopBottomQuery(pQuery)); } - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - pQInfo->pMeterQuerySupporter->subgroupIdx = 0; - pQuery->pointsRead = 0; - copyFromGroupBuf(pQInfo, pRuntimeEnv->pResult); - } - doSkipResults(pRuntimeEnv); doRevisedResultsByLimit(pQInfo); - moveDescOrderResultsToFront(pRuntimeEnv); pQInfo->pointsRead = pQuery->pointsRead; } -static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) { +static void vnodeSingleTableMultiOutputProcessor(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; SMeterObj *pMeterObj = pQInfo->pObj; @@ -1031,22 +1052,20 @@ static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) { TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); - dTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%lld, next qrange:%lld-%lld", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%" PRId64 ", next qrange:%" PRId64 "-%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->limit.offset, pQuery->lastKey, pQuery->ekey); resetCtxOutputBuf(pRuntimeEnv); } doRevisedResultsByLimit(pQInfo); - moveDescOrderResultsToFront(pRuntimeEnv); - pQInfo->pointsRead += pQuery->pointsRead; if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) { TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); - dTrace("QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%lld-%lld", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->lastKey, pQuery->ekey); } @@ -1067,7 +1086,8 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter (pQuery->skey >= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))); initCtxOutputBuf(pRuntimeEnv); - + clearCompletedSlidingWindows(&pRuntimeEnv->swindowResInfo, pQuery->numOfOutputCols); + vnodeScanAllData(pRuntimeEnv); if (isQueryKilled(pQuery)) { return; @@ -1098,7 +1118,7 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter } forwardIntervalQueryRange(pSupporter, pRuntimeEnv); - if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED)) { + if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED|QUERY_RESBUF_FULL)) { break; } @@ -1116,7 +1136,7 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter } /* handle time interval query on single table */ -static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) { +static void vnodeSingleTableIntervalProcessor(SQInfo *pQInfo) { SQuery * pQuery = &(pQInfo->query); SMeterObj *pMeterObj = pQInfo->pObj; @@ -1137,17 +1157,8 @@ static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) { taosInterpoSetStartInfo(&pRuntimeEnv->interpoInfo, pQuery->pointsRead, pQuery->interpoType); SData **pInterpoBuf = pRuntimeEnv->pInterpoBuf; - if (QUERY_IS_ASC_QUERY(pQuery)) { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memcpy(pInterpoBuf[i]->data, pQuery->sdata[i]->data, pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); - } - } else { - int32_t size = pMeterObj->pointsPerFileBlock; - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memcpy(pInterpoBuf[i]->data, - pQuery->sdata[i]->data + (size - pQuery->pointsRead) * pQuery->pSelectExpr[i].resBytes, - pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); - } + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memcpy(pInterpoBuf[i]->data, pQuery->sdata[i]->data, pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); } numOfInterpo = 0; @@ -1164,18 +1175,22 @@ static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) { pQuery->pointsRead = 0; } } + + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0)) { + pQInfo->pMeterQuerySupporter->subgroupIdx = 0; + pQuery->pointsRead = 0; + copyFromGroupBuf(pQInfo, pRuntimeEnv->swindowResInfo.pResult); + } pQInfo->pointsRead += pQuery->pointsRead; pQInfo->pointsInterpo += numOfInterpo; - moveDescOrderResultsToFront(pRuntimeEnv); - dTrace("%p vid:%d sid:%d id:%s, %d points returned %d points interpo, totalRead:%d totalInterpo:%d totalReturn:%d", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, numOfInterpo, pQInfo->pointsRead - pQInfo->pointsInterpo, pQInfo->pointsInterpo, pQInfo->pointsReturned); } -void vnodeSingleMeterQuery(SSchedMsg *pMsg) { +void vnodeSingleTableQuery(SSchedMsg *pMsg) { SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; if (pQInfo == NULL || pQInfo->pMeterQuerySupporter == NULL) { @@ -1213,7 +1228,6 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { (tFilePage **)pRuntimeEnv->pInterpoBuf, remain, &numOfInterpo); doRevisedResultsByLimit(pQInfo); - moveDescOrderResultsToFront(pRuntimeEnv); pQInfo->pointsInterpo += numOfInterpo; pQInfo->pointsRead += pQuery->pointsRead; @@ -1269,16 +1283,17 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { int64_t st = taosGetTimestampUs(); - if (pQuery->nAggTimeInterval != 0) { // interval (down sampling operation) + // group by normal column, sliding window query, interval query are handled by interval query processor + if (pQuery->nAggTimeInterval != 0 || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // interval (down sampling operation) assert(pQuery->checkBufferInLoop == 0 && pQuery->pointsOffset == pQuery->pointsToRead); - vnodeSingleMeterIntervalProcessor(pQInfo); + vnodeSingleTableIntervalProcessor(pQInfo); } else { if (isFixedOutputQuery(pQuery)) { assert(pQuery->checkBufferInLoop == 0); - vnodeSingleMeterFixedOutputProcessor(pQInfo); + vnodeSingleTableFixedOutputProcessor(pQInfo); } else { // diff/add/multiply/subtract/division assert(pQuery->checkBufferInLoop == 1); - vnodeSingleMeterMultiOutputProcessor(pQInfo); + vnodeSingleTableMultiOutputProcessor(pQInfo); } } @@ -1325,7 +1340,7 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) { assert((pQuery->checkBufferInLoop == 1 && pQuery->nAggTimeInterval == 0) || isPointInterpoQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)); - vnodeMultiMeterMultiOutputProcessor(pQInfo); + vnodeSTableSeqProcessor(pQInfo); } /* record the total elapsed time */ diff --git a/src/system/detail/src/vnodeRead.c b/src/system/detail/src/vnodeRead.c index bbd3e9465c32174566f08b809545ae4b5d7e5f65..71dd088ae97b8110c85cc3733c6500ddc52e06f4 100644 --- a/src/system/detail/src/vnodeRead.c +++ b/src/system/detail/src/vnodeRead.c @@ -25,6 +25,8 @@ #include "vnode.h" #include "vnodeRead.h" #include "vnodeUtil.h" +#include "hash.h" +#include "hashutil.h" int (*pQueryFunc[])(SMeterObj *, SQuery *) = {vnodeQueryFromCache, vnodeQueryFromFile}; @@ -265,6 +267,7 @@ static SQInfo *vnodeAllocateQInfoEx(SQueryMeterMsg *pQueryMsg, SSqlGroupbyExpr * pQuery->pGroupbyExpr = pGroupbyExpr; pQuery->nAggTimeInterval = pQueryMsg->nAggTimeInterval; + pQuery->slidingTime = pQueryMsg->slidingTime; pQuery->interpoType = pQueryMsg->interpoType; pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit; @@ -390,11 +393,6 @@ __clean_memory: return NULL; } -//static void vnodeFreeQInfoInQueueImpl(SSchedMsg *pMsg) { -// SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; -// vnodeFreeQInfo(pQInfo, true); -//} - void vnodeFreeQInfoInQueue(void *param) { SQInfo *pQInfo = (SQInfo *)param; @@ -404,15 +402,6 @@ void vnodeFreeQInfoInQueue(void *param) { dTrace("QInfo:%p set kill flag to free QInfo"); vnodeDecRefCount(pQInfo); - -// dTrace("QInfo:%p set kill flag and add to queue, stop query ASAP", pQInfo); -// SSchedMsg schedMsg = {0}; -// schedMsg.fp = vnodeFreeQInfoInQueueImpl; - -// schedMsg.msg = NULL; -// schedMsg.thandle = (void *)1; -// schedMsg.ahandle = param; -// taosScheduleTask(queryQhandle, &schedMsg); } void vnodeFreeQInfo(void *param, bool decQueryRef) { @@ -581,13 +570,13 @@ void vnodeQueryData(SSchedMsg *pMsg) { pQuery->slot = -1; // reset the handle pQuery->over = 0; - dTrace("vid:%d sid:%d id:%s, query in other media, order:%d, skey:%lld query:%p", pObj->vnode, pObj->sid, + dTrace("vid:%d sid:%d id:%s, query in other media, order:%d, skey:%" PRId64 " query:%p", pObj->vnode, pObj->sid, pObj->meterId, pQuery->order.order, pQuery->skey, pQuery); } pQInfo->pointsRead += pQuery->pointsRead; - dTrace("vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d last key:%lld, query:%p", pObj->vnode, + dTrace("vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d last key:%" PRId64 ", query:%p", pObj->vnode, pObj->sid, pObj->meterId, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->lastKey, pQuery); @@ -616,7 +605,7 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE bool isProjQuery = vnodeIsProjectionQuery(pSqlExprs, pQueryMsg->numOfOutputCols); // todo pass the correct error code - if (isProjQuery) { + if (isProjQuery && pQueryMsg->tsLen == 0) { pQInfo = vnodeAllocateQInfo(pQueryMsg, pMeterObj, pSqlExprs); } else { pQInfo = vnodeAllocateQInfoEx(pQueryMsg, pGroupbyExpr, pSqlExprs, pMetersObj[0]); @@ -630,12 +619,17 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE pQuery = &(pQInfo->query); dTrace("qmsg:%p create QInfo:%p, QInfo created", pQueryMsg, pQInfo); - pQuery->skey = pQueryMsg->skey; + SMeterSidExtInfo** pSids = (SMeterSidExtInfo**)pQueryMsg->pSidExtInfo; + if (pSids != NULL && pSids[0]->key > 0) { + pQuery->skey = pSids[0]->key; + } else { + pQuery->skey = pQueryMsg->skey; + } + pQuery->ekey = pQueryMsg->ekey; pQuery->lastKey = pQuery->skey; pQInfo->fp = pQueryFunc[pQueryMsg->order]; - pQInfo->num = pQueryMsg->num; if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) { dError("QInfo:%p vid:%d sid:%d meterId:%s, init dataReady sem failed, reason:%s", pQInfo, pMeterObj->vnode, @@ -646,7 +640,9 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE SSchedMsg schedMsg = {0}; - if (!isProjQuery) { + if (isProjQuery && pQueryMsg->tsLen == 0) { + schedMsg.fp = vnodeQueryData; + } else { if (vnodeParametersSafetyCheck(pQuery) == false) { *code = TSDB_CODE_APP_ERROR; goto _error; @@ -655,8 +651,9 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj)); pSupporter->numOfMeters = 1; - pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt); - taosAddIntHash(pSupporter->pMeterObj, pMetersObj[0]->sid, (char *)&pMetersObj[0]); + pSupporter->pMetersHashTable = taosInitHashTable(pSupporter->numOfMeters, taosIntHash_32, false); + taosAddToHashTable(pSupporter->pMetersHashTable, (const char*) &pMetersObj[0]->sid, sizeof(pMeterObj[0].sid), + (char *)&pMetersObj[0], POINTER_BYTES); pSupporter->pSidSet = NULL; pSupporter->subgroupIdx = -1; @@ -682,9 +679,7 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE return pQInfo; } - schedMsg.fp = vnodeSingleMeterQuery; - } else { - schedMsg.fp = vnodeQueryData; + schedMsg.fp = vnodeSingleTableQuery; } /* @@ -734,7 +729,6 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE pQuery->ekey = pQueryMsg->ekey; pQInfo->fp = pQueryFunc[pQueryMsg->order]; - pQInfo->num = pQueryMsg->num; if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) { dError("QInfo:%p vid:%d sid:%d id:%s, init dataReady sem failed, reason:%s", pQInfo, pMetersObj[0]->vnode, @@ -748,12 +742,12 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj)); pSupporter->numOfMeters = pQueryMsg->numOfSids; - pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt); + pSupporter->pMetersHashTable = taosInitHashTable(pSupporter->numOfMeters, taosIntHash_32, false); for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { - taosAddIntHash(pSupporter->pMeterObj, pMetersObj[i]->sid, (char *)&pMetersObj[i]); + taosAddToHashTable(pSupporter->pMetersHashTable, (const char*) &pMetersObj[i]->sid, sizeof(pMetersObj[i]->sid), (char *)&pMetersObj[i], + POINTER_BYTES); } - pSupporter->pMeterSidExtInfo = (SMeterSidExtInfo **)pQueryMsg->pSidExtInfo; int32_t sidElemLen = pQueryMsg->tagLength + sizeof(SMeterSidExtInfo); int32_t size = POINTER_BYTES * pQueryMsg->numOfSids + sidElemLen * pQueryMsg->numOfSids; @@ -767,12 +761,16 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE char *px = ((char *)pSupporter->pMeterSidExtInfo) + POINTER_BYTES * pQueryMsg->numOfSids; for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { - pSupporter->pMeterSidExtInfo[i] = (SMeterSidExtInfo *)px; - pSupporter->pMeterSidExtInfo[i]->sid = ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->sid; + SMeterSidExtInfo* pSrc = ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]; + SMeterSidExtInfo* pDst = (SMeterSidExtInfo *)px; + + pSupporter->pMeterSidExtInfo[i] = pDst; + pDst->sid = pSrc->sid; + pDst->uid = pSrc->uid; + pDst->key = pSrc->key; if (pQueryMsg->tagLength > 0) { - memcpy(pSupporter->pMeterSidExtInfo[i]->tags, ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->tags, - pQueryMsg->tagLength); + memcpy(pDst->tags, pSrc->tags, pQueryMsg->tagLength); } px += sidElemLen; } @@ -902,7 +900,7 @@ int vnodeSaveQueryResult(void *handle, char *data, int32_t *size) { if (pQInfo->pMeterQuerySupporter != NULL) { if (pQInfo->pMeterQuerySupporter->pSidSet == NULL) { - schedMsg.fp = vnodeSingleMeterQuery; + schedMsg.fp = vnodeSingleTableQuery; } else { // group by tag schedMsg.fp = vnodeMultiMeterQuery; } @@ -923,27 +921,27 @@ int vnodeSaveQueryResult(void *handle, char *data, int32_t *size) { static int32_t validateQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { if (pQueryMsg->nAggTimeInterval < 0) { - dError("qmsg:%p illegal value of aggTimeInterval %ld", pQueryMsg, pQueryMsg->nAggTimeInterval); + dError("qmsg:%p illegal value of aggTimeInterval %" PRId64 "", pQueryMsg, pQueryMsg->nAggTimeInterval); return -1; } if (pQueryMsg->numOfTagsCols < 0 || pQueryMsg->numOfTagsCols > TSDB_MAX_TAGS + 1) { - dError("qmsg:%p illegal value of numOfTagsCols %ld", pQueryMsg, pQueryMsg->numOfTagsCols); + dError("qmsg:%p illegal value of numOfTagsCols %d", pQueryMsg, pQueryMsg->numOfTagsCols); return -1; } if (pQueryMsg->numOfCols <= 0 || pQueryMsg->numOfCols > TSDB_MAX_COLUMNS) { - dError("qmsg:%p illegal value of numOfCols %ld", pQueryMsg, pQueryMsg->numOfCols); + dError("qmsg:%p illegal value of numOfCols %d", pQueryMsg, pQueryMsg->numOfCols); return -1; } if (pQueryMsg->numOfSids <= 0) { - dError("qmsg:%p illegal value of numOfSids %ld", pQueryMsg, pQueryMsg->numOfSids); + dError("qmsg:%p illegal value of numOfSids %d", pQueryMsg, pQueryMsg->numOfSids); return -1; } if (pQueryMsg->numOfGroupCols < 0) { - dError("qmsg:%p illegal value of numOfGroupbyCols %ld", pQueryMsg, pQueryMsg->numOfGroupCols); + dError("qmsg:%p illegal value of numOfGroupbyCols %d", pQueryMsg, pQueryMsg->numOfGroupCols); return -1; } @@ -972,14 +970,14 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pQueryMsg->ekey = htobe64(pQueryMsg->ekey); #endif - pQueryMsg->num = htonl(pQueryMsg->num); - pQueryMsg->order = htons(pQueryMsg->order); pQueryMsg->orderColId = htons(pQueryMsg->orderColId); pQueryMsg->queryType = htons(pQueryMsg->queryType); pQueryMsg->nAggTimeInterval = htobe64(pQueryMsg->nAggTimeInterval); + pQueryMsg->slidingTime = htobe64(pQueryMsg->slidingTime); + pQueryMsg->numOfTagsCols = htons(pQueryMsg->numOfTagsCols); pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols); pQueryMsg->numOfOutputCols = htons(pQueryMsg->numOfOutputCols); @@ -1102,11 +1100,13 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pSids[0] = (SMeterSidExtInfo *)pMsg; pSids[0]->sid = htonl(pSids[0]->sid); pSids[0]->uid = htobe64(pSids[0]->uid); + pSids[0]->key = htobe64(pSids[0]->key); for (int32_t j = 1; j < pQueryMsg->numOfSids; ++j) { pSids[j] = (SMeterSidExtInfo *)((char *)pSids[j - 1] + sizeof(SMeterSidExtInfo) + pQueryMsg->tagLength); pSids[j]->sid = htonl(pSids[j]->sid); pSids[j]->uid = htobe64(pSids[j]->uid); + pSids[j]->key = htobe64(pSids[j]->key); } pMsg = (char *)pSids[pQueryMsg->numOfSids - 1]; @@ -1141,9 +1141,9 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { } } - dTrace("qmsg:%p query on %d meter(s), qrange:%lld-%lld, numOfGroupbyTagCols:%d, numOfTagCols:%d, timestamp order:%d, " - "tags order:%d, tags order col:%d, numOfOutputCols:%d, numOfCols:%d, interval:%lld, fillType:%d, comptslen:%d, limit:%lld, " - "offset:%lld", + dTrace("qmsg:%p query on %d meter(s), qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, numOfTagCols:%d, timestamp order:%d, " + "tags order:%d, tags order col:%d, numOfOutputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptslen:%d, limit:%" PRId64 ", " + "offset:%" PRId64, pQueryMsg, pQueryMsg->numOfSids, pQueryMsg->skey, pQueryMsg->ekey, pQueryMsg->numOfGroupCols, pQueryMsg->numOfTagsCols, pQueryMsg->order, pQueryMsg->orderType, pQueryMsg->orderByIdx, pQueryMsg->numOfOutputCols, pQueryMsg->numOfCols, pQueryMsg->nAggTimeInterval, pQueryMsg->interpoType, diff --git a/src/system/detail/src/vnodeShell.c b/src/system/detail/src/vnodeShell.c index ce1cabe1415c37173b3db1a1d3cc138b7cbef6f7..69d502c61828154b6ea985399e41e5039a6f9805 100644 --- a/src/system/detail/src/vnodeShell.c +++ b/src/system/detail/src/vnodeShell.c @@ -215,7 +215,10 @@ void vnodeCloseShellVnode(int vnode) { if (shellList[vnode] == NULL) return; for (int i = 0; i < vnodeList[vnode].cfg.maxSessions; ++i) { - vnodeFreeQInfo(shellList[vnode][i].qhandle, true); + void* qhandle = shellList[vnode][i].qhandle; + if (qhandle != NULL) { + vnodeDecRefCount(qhandle); + } } int32_t* v = malloc(sizeof(int32_t)); @@ -308,7 +311,7 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (pVnode->cfg.maxSessions == 0) { dError("qmsg:%p,vid:%d is not activated yet", pQueryMsg, pQueryMsg->vnode); vnodeSendVpeerCfgMsg(pQueryMsg->vnode); - code = TSDB_CODE_NOT_ACTIVE_TABLE; + code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _query_over; } @@ -352,7 +355,7 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { assert(incNumber <= pQueryMsg->numOfSids); pthread_mutex_unlock(&pVnode->vmutex); - if (code != TSDB_CODE_SUCCESS) { + if (code != TSDB_CODE_SUCCESS || pQueryMsg->numOfSids == 0) { // all the meters may have been dropped. goto _query_over; } @@ -369,8 +372,10 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (pObj->qhandle) { dTrace("QInfo:%p %s free qhandle", pObj->qhandle, __FUNCTION__); - vnodeFreeQInfo(pObj->qhandle, true); + void* qHandle = pObj->qhandle; pObj->qhandle = NULL; + + vnodeDecRefCount(qHandle); } if (QUERY_IS_STABLE_QUERY(pQueryMsg->queryType)) { @@ -412,6 +417,7 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { int code = 0; pRetrieve = (SRetrieveMeterMsg *)pMsg; + SQInfo* pQInfo = (SQInfo*)pRetrieve->qhandle; pRetrieve->free = htons(pRetrieve->free); if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) { @@ -438,7 +444,15 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { size = vnodeGetResultSize((void *)(pRetrieve->qhandle), &numOfRows); } - pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, size + 100); + // buffer size for progress information, including meter count, + // and for each meter, including 'uid' and 'TSKEY'. + int progressSize = 0; + if (pQInfo->pMeterQuerySupporter != NULL) + progressSize = pQInfo->pMeterQuerySupporter->numOfMeters * (sizeof(int64_t) + sizeof(TSKEY)) + sizeof(int32_t); + else if (pQInfo->pObj != NULL) + progressSize = sizeof(int64_t) + sizeof(TSKEY) + sizeof(int32_t); + + pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, progressSize + size + 100); if (pStart == NULL) { taosSendSimpleRsp(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); goto _exit; @@ -468,11 +482,37 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { } pMsg += size; + + // write the progress information of each meter to response + // this is required by subscriptions + if (pQInfo->pMeterQuerySupporter != NULL && pQInfo->pMeterQuerySupporter->pMeterSidExtInfo != NULL) { + *((int32_t*)pMsg) = htonl(pQInfo->pMeterQuerySupporter->numOfMeters); + pMsg += sizeof(int32_t); + for (int32_t i = 0; i < pQInfo->pMeterQuerySupporter->numOfMeters; i++) { + *((int64_t*)pMsg) = htobe64(pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[i]->uid); + pMsg += sizeof(int64_t); + *((TSKEY*)pMsg) = htobe64(pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[i]->key); + pMsg += sizeof(TSKEY); + } + } else if (pQInfo->pObj != NULL) { + *((int32_t*)pMsg) = htonl(1); + pMsg += sizeof(int32_t); + *((int64_t*)pMsg) = htobe64(pQInfo->pObj->uid); + pMsg += sizeof(int64_t); + if (pQInfo->pointsRead > 0) { + *((TSKEY*)pMsg) = htobe64(pQInfo->query.lastKey + 1); + } else { + *((TSKEY*)pMsg) = htobe64(pQInfo->query.lastKey); + } + pMsg += sizeof(TSKEY); + } + msgLen = pMsg - pStart; assert(code != TSDB_CODE_ACTION_IN_PROGRESS); - if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS)) { + if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS) && + pRetrieve->qhandle != 0) { dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code); vnodeDecRefCount(pObj->qhandle); pObj->qhandle = NULL; @@ -514,7 +554,7 @@ static int vnodeCheckSubmitBlockContext(SShellSubmitBlock *pBlocks, SVnodeObj *p } if (pMeterObj->uid != uid) { - dError("vid:%d sid:%d id:%s, uid:%lld, uid in msg:%lld, uid mismatch", pVnode->vnode, sid, pMeterObj->meterId, + dError("vid:%d sid:%d id:%s, uid:%" PRIu64 ", uid in msg:%" PRIu64 ", uid mismatch", pVnode->vnode, sid, pMeterObj->meterId, pMeterObj->uid, uid); return TSDB_CODE_INVALID_SUBMIT_MSG; } @@ -584,6 +624,7 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { SShellSubmitMsg *pSubmit = &shellSubmit; SShellSubmitBlock *pBlocks = NULL; + pSubmit->import = htons(pSubmit->import); pSubmit->vnode = htons(pSubmit->vnode); pSubmit->numOfSid = htonl(pSubmit->numOfSid); diff --git a/src/system/detail/src/vnodeStatus.c b/src/system/detail/src/vnodeStatus.c index d7b593ec139f920792f5b71ca150331360d69b6a..d78f3633fbb2ab23b5e2f0179eaa7bc98de79813 100644 --- a/src/system/detail/src/vnodeStatus.c +++ b/src/system/detail/src/vnodeStatus.c @@ -19,11 +19,13 @@ const char* taosGetVgroupStatusStr(int32_t vgroupStatus) { switch (vgroupStatus) { - case TSDB_VG_STATUS_READY: return "ready"; - case TSDB_VG_STATUS_IN_PROGRESS: return "inprogress"; - case TSDB_VG_STATUS_COMMITLOG_INIT_FAILED: return "commitlog_init_failed"; - case TSDB_VG_STATUS_INIT_FAILED: return "init_failed"; - case TSDB_VG_STATUS_FULL: return "full"; + case TSDB_VG_STATUS_READY: return tsError[vgroupStatus]; + case TSDB_VG_STATUS_IN_PROGRESS: return tsError[vgroupStatus]; + case TSDB_VG_STATUS_NO_DISK_PERMISSIONS: return tsError[vgroupStatus]; + case TSDB_VG_STATUS_SERVER_NO_PACE: return tsError[vgroupStatus]; + case TSDB_VG_STATUS_SERV_OUT_OF_MEMORY: return tsError[vgroupStatus]; + case TSDB_VG_STATUS_INIT_FAILED: return tsError[vgroupStatus]; + case TSDB_VG_STATUS_FULL: return tsError[vgroupStatus]; default: return "undefined"; } } diff --git a/src/system/detail/src/vnodeStore.c b/src/system/detail/src/vnodeStore.c index 360216e9645f6e1bbbc9d15884bc9996381e55b8..5949b1636d1e5d48991df1ed06f63ca354a79d9a 100644 --- a/src/system/detail/src/vnodeStore.c +++ b/src/system/detail/src/vnodeStore.c @@ -42,24 +42,24 @@ static int vnodeInitStoreVnode(int vnode) { pVnode->pCachePool = vnodeOpenCachePool(vnode); if (pVnode->pCachePool == NULL) { dError("vid:%d, cache pool init failed.", pVnode->vnode); - return -1; + return TSDB_CODE_SERV_OUT_OF_MEMORY; } - if (vnodeInitFile(vnode) < 0) { + if (vnodeInitFile(vnode) != TSDB_CODE_SUCCESS) { dError("vid:%d, files init failed.", pVnode->vnode); - return -1; + return TSDB_CODE_VG_INIT_FAILED; } - if (vnodeInitCommit(vnode) < 0) { + if (vnodeInitCommit(vnode) != TSDB_CODE_SUCCESS) { dError("vid:%d, commit init failed.", pVnode->vnode); - return -1; + return TSDB_CODE_VG_INIT_FAILED; } pthread_mutex_init(&(pVnode->vmutex), NULL); - dPrint("vid:%d, storage initialized, version:%ld fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, + dPrint("vid:%d, storage initialized, version:%" PRIu64 " fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, pVnode->numOfFiles); - return 0; + return TSDB_CODE_SUCCESS; } int vnodeOpenVnode(int vnode) { @@ -183,22 +183,45 @@ int vnodeCreateVnode(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { vnodeList[vnode].vnodeStatus = TSDB_VN_STATUS_CREATING; sprintf(fileName, "%s/vnode%d", tsDirectory, vnode); - mkdir(fileName, 0755); + if (mkdir(fileName, 0755) != 0) { + dError("failed to create vnode:%d directory:%s, errno:%d, reason:%s", vnode, fileName, errno, strerror(errno)); + if (errno == EACCES) { + return TSDB_CODE_NO_DISK_PERMISSIONS; + } else if (errno == ENOSPC) { + return TSDB_CODE_SERV_NO_DISKSPACE; + } else if (errno == EEXIST) { + } else { + return TSDB_CODE_VG_INIT_FAILED; + } + } sprintf(fileName, "%s/vnode%d/db", tsDirectory, vnode); - mkdir(fileName, 0755); + if (mkdir(fileName, 0755) != 0) { + dError("failed to create vnode:%d directory:%s, errno:%d, reason:%s", vnode, fileName, errno, strerror(errno)); + if (errno == EACCES) { + return TSDB_CODE_NO_DISK_PERMISSIONS; + } else if (errno == ENOSPC) { + return TSDB_CODE_SERV_NO_DISKSPACE; + } else if (errno == EEXIST) { + } else { + return TSDB_CODE_VG_INIT_FAILED; + } + } vnodeList[vnode].cfg = *pCfg; - if (vnodeCreateMeterObjFile(vnode) != 0) { - return TSDB_CODE_VG_INIT_FAILED; + int code = vnodeCreateMeterObjFile(vnode); + if (code != TSDB_CODE_SUCCESS) { + return code; } - if (vnodeSaveVnodeCfg(vnode, pCfg, pDesc) != 0) { + code = vnodeSaveVnodeCfg(vnode, pCfg, pDesc); + if (code != TSDB_CODE_SUCCESS) { return TSDB_CODE_VG_INIT_FAILED; } - if (vnodeInitStoreVnode(vnode) < 0) { - return TSDB_CODE_VG_COMMITLOG_INIT_FAILED; + code = vnodeInitStoreVnode(vnode); + if (code != TSDB_CODE_SUCCESS) { + return code; } return vnodeOpenVnode(vnode); @@ -291,7 +314,8 @@ int vnodeInitStore() { if (vnodeInitInfo() < 0) return -1; for (vnode = 0; vnode < TSDB_MAX_VNODES; ++vnode) { - if (vnodeInitStoreVnode(vnode) < 0) { + int code = vnodeInitStoreVnode(vnode); + if (code != TSDB_CODE_SUCCESS) { // one vnode is failed to recover from commit log, continue for remain return -1; } diff --git a/src/system/detail/src/vnodeTagMgmt.c b/src/system/detail/src/vnodeTagMgmt.c index adf4e544bbf9efea877aa3f688afc981437815e6..cea4f75f83fb200f1bae2473691a329ce91ccae5 100644 --- a/src/system/detail/src/vnodeTagMgmt.c +++ b/src/system/detail/src/vnodeTagMgmt.c @@ -323,7 +323,7 @@ void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *p printf("%f, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, float)); break; case TSDB_DATA_TYPE_BIGINT: - printf("%ld, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int64_t)); + printf("%" PRId64 ", ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int64_t)); break; case TSDB_DATA_TYPE_SMALLINT: printf("%d, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int16_t)); diff --git a/src/system/detail/src/vnodeUtil.c b/src/system/detail/src/vnodeUtil.c index 943bec4250884330d167e83188662f1b5b97f8cd..c9d7ca0cf42d099c3e30906992084841950c6ef1 100644 --- a/src/system/detail/src/vnodeUtil.c +++ b/src/system/detail/src/vnodeUtil.c @@ -195,9 +195,9 @@ static int32_t vnodeBuildExprFromArithmeticStr(SSqlFunctionExpr* pExpr, SQueryMe num = i + 1; pBinaryExprInfo->pReqColumns = malloc(sizeof(SColIndexEx) * num); - for (int32_t i = 0; i < num; ++i) { - SColIndexEx* pColIndex = &pBinaryExprInfo->pReqColumns[i]; - pColIndex->colId = ids[i]; + for (int32_t k = 0; k < num; ++k) { + SColIndexEx* pColIndex = &pBinaryExprInfo->pReqColumns[k]; + pColIndex->colId = ids[k]; } pBinaryExprInfo->numOfCols = num; @@ -252,7 +252,7 @@ SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t* if (pColumnIndexExInfo->colIdx >= pQueryMsg->numOfTagsCols) { *code = TSDB_CODE_INVALID_QUERY_MSG; tfree(pExprs); - break; + return NULL; } type = pTagSchema[pColumnIndexExInfo->colIdx].type; @@ -264,7 +264,7 @@ SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t* if (*code != TSDB_CODE_SUCCESS) { tfree(pExprs); - break; + return NULL; } type = TSDB_DATA_TYPE_DOUBLE; @@ -539,24 +539,31 @@ bool vnodeIsProjectionQuery(SSqlFunctionExpr* pExpr, int32_t numOfOutput) { * 3. insert has nothing to do with the query processing. */ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSids, SMeterObj** pMeterObjList, - int32_t* numOfInc) { + int32_t* numOfIncTables) { SVnodeObj* pVnode = &vnodeList[pQueryMsg->vnode]; int32_t num = 0; + int32_t index = 0; + int32_t code = TSDB_CODE_SUCCESS; for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { SMeterObj* pMeter = pVnode->meterList[pSids[i]->sid]; + /* + * If table is missing or is in dropping status, config it from management node, and ignore it + * during query processing. The error code of TSDB_CODE_NOT_ACTIVE_TABLE will never return to client. + * The missing table needs to be removed from pSids list + */ if (pMeter == NULL || vnodeIsMeterState(pMeter, TSDB_METER_STATE_DROPPING)) { - code = TSDB_CODE_NOT_ACTIVE_TABLE; - dError("qmsg:%p, vid:%d sid:%d, not there or will be dropped", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid); + dWarn("qmsg:%p, vid:%d sid:%d, not there or will be dropped, ignore this table in query", pQueryMsg, + pQueryMsg->vnode, pSids[i]->sid); vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); continue; } else if (pMeter->uid != pSids[i]->uid || pMeter->sid != pSids[i]->sid) { code = TSDB_CODE_TABLE_ID_MISMATCH; - dError("qmsg:%p, vid:%d sid:%d id:%s uid:%lld, id mismatch. sid:%d uid:%lld in msg", pQueryMsg, + dError("qmsg:%p, vid:%d sid:%d id:%s uid:%" PRIu64 ", id mismatch. sid:%d uid:%" PRId64 " in msg", pQueryMsg, pQueryMsg->vnode, pMeter->sid, pMeter->meterId, pMeter->uid, pSids[i]->sid, pSids[i]->uid); vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); @@ -572,9 +579,11 @@ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSid * vnodeIsSafeToDeleteMeter will wait for this function complete, and then it can * check if the numOfQueries is 0 or not. */ - pMeterObjList[(*numOfInc)++] = pMeter; + pMeterObjList[(*numOfIncTables)++] = pMeter; atomic_fetch_add_32(&pMeter->numOfQueries, 1); - + + pSids[index++] = pSids[i]; + // output for meter more than one query executed if (pMeter->numOfQueries > 1) { dTrace("qmsg:%p, vid:%d sid:%d id:%s, inc query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, @@ -583,16 +592,19 @@ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSid } } - dTrace("qmsg:%p, query meters: %d, inc query ref %d, numOfQueries on %d meters are 1", pQueryMsg, - pQueryMsg->numOfSids, *numOfInc, (*numOfInc) - num); + dTrace("qmsg:%p, query meters: %d, inc query ref %d, numOfQueries on %d meters are 1, queried meters:%d after " + "filter missing meters", pQueryMsg, pQueryMsg->numOfSids, *numOfIncTables, (*numOfIncTables) - num, index); + assert(pQueryMsg->numOfSids >= (*numOfIncTables) && pQueryMsg->numOfSids >= index); + + pQueryMsg->numOfSids = index; return code; } -void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, int32_t numOfInc) { +void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, int32_t numOfIncTables) { int32_t num = 0; - for (int32_t i = 0; i < numOfInc; ++i) { + for (int32_t i = 0; i < numOfIncTables; ++i) { SMeterObj* pMeter = pMeterObjList[i]; if (pMeter != NULL) { // here, do not need to lock to perform operations @@ -606,7 +618,7 @@ void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, } } - dTrace("qmsg:%p, dec query ref for %d meters, numOfQueries on %d meters are 0", pQueryMsg, numOfInc, numOfInc - num); + dTrace("qmsg:%p, dec query ref for %d meters, numOfQueries on %d meters are 0", pQueryMsg, numOfIncTables, numOfIncTables - num); } void vnodeUpdateQueryColumnIndex(SQuery* pQuery, SMeterObj* pMeterObj) { diff --git a/src/system/lite/src/mgmtShell.spec.c b/src/system/lite/src/mgmtShell.spec.c index 5195010b4129fb2677526aa62f3d40f53d9bdded..a1d8e6a34a4e2cc2d7df7c1acc6cdf75a796fc1a 100644 --- a/src/system/lite/src/mgmtShell.spec.c +++ b/src/system/lite/src/mgmtShell.spec.c @@ -24,7 +24,7 @@ int mgmtProcessAlterAcctMsg(char *pMsg, int msgLen, SConnObj *pConn) { } int mgmtProcessCreateDnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { - return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_PNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); + return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_DNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); } int mgmtProcessCfgMnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { @@ -36,7 +36,7 @@ int mgmtProcessDropMnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { } int mgmtProcessDropDnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { - return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_PNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); + return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_DNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); } int mgmtProcessDropAcctMsg(char *pMsg, int msgLen, SConnObj *pConn) { diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index f88e5d6396fcd6762a6f1fb42e7b97ec46113354..76664ef9ec2e0c42642fc3beb49bfa865cc96d6a 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -34,6 +34,7 @@ ELSEIF (TD_WINDOWS_64) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/iconv) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) + LIST(APPEND SRC ./src/hash.c) LIST(APPEND SRC ./src/ihash.c) LIST(APPEND SRC ./src/lz4.c) LIST(APPEND SRC ./src/shash.c) @@ -68,9 +69,38 @@ ELSEIF (TD_WINDOWS_64) TARGET_LINK_LIBRARIES(tutil iconv regex pthread os winmm IPHLPAPI ws2_32) ELSEIF(TD_DARWIN_64) ADD_DEFINITIONS(-DUSE_LIBICONV) - AUX_SOURCE_DIRECTORY(src SRC) - LIST(REMOVE_ITEM SRC ./src/tcrc32c.c) - LIST(REMOVE_ITEM SRC ./src/tdes.c) + LIST(APPEND SRC ./src/hash.c) + LIST(APPEND SRC ./src/ihash.c) + LIST(APPEND SRC ./src/lz4.c) + LIST(APPEND SRC ./src/shash.c) + LIST(APPEND SRC ./src/tbase64.c) + LIST(APPEND SRC ./src/tcache.c) + LIST(APPEND SRC ./src/tcompression.c) + LIST(APPEND SRC ./src/textbuffer.c) + LIST(APPEND SRC ./src/tglobalcfg.c) + LIST(APPEND SRC ./src/thash.c) + LIST(APPEND SRC ./src/thashutil.c) + LIST(APPEND SRC ./src/thistogram.c) + LIST(APPEND SRC ./src/tidpool.c) + LIST(APPEND SRC ./src/tinterpolation.c) + LIST(APPEND SRC ./src/tlog.c) + LIST(APPEND SRC ./src/tlosertree.c) + LIST(APPEND SRC ./src/tmd5.c) + LIST(APPEND SRC ./src/tmem.c) + LIST(APPEND SRC ./src/tmempool.c) + LIST(APPEND SRC ./src/tmodule.c) + LIST(APPEND SRC ./src/tnote.c) + LIST(APPEND SRC ./src/tsched.c) + LIST(APPEND SRC ./src/tskiplist.c) + LIST(APPEND SRC ./src/tsocket.c) + LIST(APPEND SRC ./src/tstrbuild.c) + LIST(APPEND SRC ./src/ttime.c) + LIST(APPEND SRC ./src/ttimer.c) + LIST(APPEND SRC ./src/ttokenizer.c) + LIST(APPEND SRC ./src/ttypes.c) + LIST(APPEND SRC ./src/tutil.c) + LIST(APPEND SRC ./src/version.c) + LIST(APPEND SRC ./src/hash.c) ADD_LIBRARY(tutil ${SRC}) TARGET_LINK_LIBRARIES(tutil iconv pthread os) ENDIF() diff --git a/src/util/src/hash.c b/src/util/src/hash.c new file mode 100644 index 0000000000000000000000000000000000000000..506829368812325d4c77492ea9411d9952944034 --- /dev/null +++ b/src/util/src/hash.c @@ -0,0 +1,545 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" + +#include "hash.h" +#include "tlog.h" +#include "ttime.h" +#include "tutil.h" + +static FORCE_INLINE void __wr_lock(void *lock) { +#if defined LINUX + pthread_rwlock_wrlock(lock); +#else + pthread_mutex_lock(lock); +#endif +} + +static FORCE_INLINE void __rd_lock(void *lock) { +#if defined LINUX + pthread_rwlock_rdlock(lock); +#else + pthread_mutex_lock(lock); +#endif +} + +static FORCE_INLINE void __unlock(void *lock) { +#if defined LINUX + pthread_rwlock_unlock(lock); +#else + pthread_mutex_unlock(lock); +#endif +} + +static FORCE_INLINE int32_t __lock_init(void *lock) { +#if defined LINUX + return pthread_rwlock_init(lock, NULL); +#else + return pthread_mutex_init(lock, NULL); +#endif +} + +static FORCE_INLINE void __lock_destroy(void *lock) { +#if defined LINUX + pthread_rwlock_destroy(lock); +#else + pthread_mutex_destroy(lock); +#endif +} + +static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { + int32_t len = MIN(length, HASH_MAX_CAPACITY); + + uint32_t i = 4; + while (i < len) i = (i << 1U); + return i; +} + +/** + * hash key function + * + * @param key key string + * @param len length of key + * @return hash value + */ +static FORCE_INLINE uint32_t taosHashKey(const char *key, uint32_t len) { return MurmurHash3_32(key, len); } + +/** + * inplace update node in hash table + * @param pObj hash table object + * @param pNode data node + */ +static void doUpdateHashTable(HashObj *pObj, SHashNode *pNode) { + if (pNode->prev1) { + pNode->prev1->next = pNode; + } + + if (pNode->next) { + (pNode->next)->prev = pNode; + } + + pTrace("key:%s %p update hash table", pNode->key, pNode); +} + +/** + * get SHashNode from hashlist, nodes from trash are not included. + * @param pObj Cache objection + * @param key key for hash + * @param keyLen key length + * @return + */ +static SHashNode *doGetNodeFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen, uint32_t *hashVal) { + uint32_t hash = (*pObj->hashFp)(key, keyLen); + + int32_t slot = HASH_INDEX(hash, pObj->capacity); + SHashEntry *pEntry = pObj->hashList[slot]; + + SHashNode *pNode = pEntry->next; + while (pNode) { + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + break; + } + + pNode = pNode->next; + } + + if (pNode) { + assert(HASH_INDEX(pNode->hashVal, pObj->capacity) == slot); + } + + // return the calculated hash value, to avoid calculating it again in other functions + if (hashVal != NULL) { + *hashVal = hash; + } + + return pNode; +} + +/** + * resize the hash list if the threshold is reached + * + * @param pObj + */ +static void taosHashTableResize(HashObj *pObj) { + if (pObj->size < pObj->capacity * HASH_DEFAULT_LOAD_FACTOR) { + return; + } + + // double the original capacity + SHashNode *pNode = NULL; + SHashNode *pNext = NULL; + + int32_t newSize = pObj->capacity << 1U; + if (newSize > HASH_MAX_CAPACITY) { + pTrace("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", pObj->capacity, + HASH_MAX_CAPACITY); + return; + } + + int64_t st = taosGetTimestampUs(); + + SHashEntry **pNewEntry = realloc(pObj->hashList, sizeof(SHashEntry*) * newSize); + if (pNewEntry == NULL) { + pTrace("cache resize failed due to out of memory, capacity remain:%d", pObj->capacity); + return; + } + + pObj->hashList = pNewEntry; + for(int32_t i = pObj->capacity; i < newSize; ++i) { + pObj->hashList[i] = calloc(1, sizeof(SHashEntry)); + } + + pObj->capacity = newSize; + + for (int32_t i = 0; i < pObj->capacity; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + + pNode = pEntry->next; + if (pNode != NULL) { + assert(pNode->prev1 == pEntry && pEntry->num > 0); + } + + while (pNode) { + int32_t j = HASH_INDEX(pNode->hashVal, pObj->capacity); + if (j == i) { // this key resides in the same slot, no need to relocate it + pNode = pNode->next; + } else { + pNext = pNode->next; + + // remove from current slot + assert(pNode->prev1 != NULL); + + if (pNode->prev1 == pEntry) { // first node of the overflow linked list + pEntry->next = pNode->next; + } else { + pNode->prev->next = pNode->next; + } + + pEntry->num--; + assert(pEntry->num >= 0); + + if (pNode->next != NULL) { + (pNode->next)->prev = pNode->prev; + } + + // added into new slot + pNode->next = NULL; + pNode->prev1 = NULL; + + SHashEntry *pNewIndexEntry = pObj->hashList[j]; + + if (pNewIndexEntry->next != NULL) { + assert(pNewIndexEntry->next->prev1 == pNewIndexEntry); + + pNewIndexEntry->next->prev = pNode; + } + + pNode->next = pNewIndexEntry->next; + pNode->prev1 = pNewIndexEntry; + + pNewIndexEntry->next = pNode; + pNewIndexEntry->num++; + + // continue + pNode = pNext; + } + } + } + + int64_t et = taosGetTimestampUs(); + + pTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pObj->capacity, + ((double)pObj->size) / pObj->capacity, (et - st) / 1000.0); +} + +/** + * @param capacity maximum slots available for hash elements + * @param fn hash function + * @return + */ +void *taosInitHashTable(uint32_t capacity, _hash_fn_t fn, bool multithreadSafe) { + if (capacity == 0 || fn == NULL) { + return NULL; + } + + HashObj *pObj = (HashObj *)calloc(1, sizeof(HashObj)); + if (pObj == NULL) { + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + // the max slots is not defined by user + pObj->capacity = taosHashCapacity(capacity); + assert((pObj->capacity & (pObj->capacity - 1)) == 0); + + pObj->hashFp = fn; + + pObj->hashList = (SHashEntry **)calloc(pObj->capacity, sizeof(SHashEntry*)); + if (pObj->hashList == NULL) { + free(pObj); + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + for(int32_t i = 0; i < pObj->capacity; ++i) { + pObj->hashList[i] = calloc(1, sizeof(SHashEntry)); + } + + if (multithreadSafe && (__lock_init(pObj) != 0)) { + free(pObj->hashList); + free(pObj); + + pError("failed to init lock, reason:%s", strerror(errno)); + return NULL; + } + + return (void *)pObj; +} + +/** + * @param key key of object for hash, usually a null-terminated string + * @param keyLen length of key + * @param pData actually data. required a consecutive memory block, no pointer is allowed + * in pData. Pointer copy causes memory access error. + * @param size size of block + * @return SHashNode + */ +static SHashNode *doCreateHashNode(const char *key, uint32_t keyLen, const char *pData, size_t dataSize, + uint32_t hashVal) { + size_t totalSize = dataSize + sizeof(SHashNode) + keyLen; + + SHashNode *pNewNode = calloc(1, totalSize); + if (pNewNode == NULL) { + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + memcpy(pNewNode->data, pData, dataSize); + + pNewNode->key = pNewNode->data + dataSize; + memcpy(pNewNode->key, key, keyLen); + pNewNode->keyLen = keyLen; + + pNewNode->hashVal = hashVal; + + return pNewNode; +} + +static SHashNode *doUpdateHashNode(SHashNode *pNode, const char *key, uint32_t keyLen, const char *pData, + size_t dataSize) { + size_t size = dataSize + sizeof(SHashNode) + keyLen; + + SHashNode *pNewNode = (SHashNode *)realloc(pNode, size); + if (pNewNode == NULL) { + return NULL; + } + + memcpy(pNewNode->data, pData, dataSize); + + pNewNode->key = pNewNode->data + dataSize; + + assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen); + + memcpy(pNewNode->key, key, keyLen); + return pNewNode; +} + +/** + * insert the hash node at the front of the linked list + * + * @param pObj + * @param pNode + */ +static void doAddToHashTable(HashObj *pObj, SHashNode *pNode) { + assert(pNode != NULL); + + int32_t index = HASH_INDEX(pNode->hashVal, pObj->capacity); + SHashEntry *pEntry = pObj->hashList[index]; + + pNode->next = pEntry->next; + + if (pEntry->next) { + pEntry->next->prev = pNode; + } + + pEntry->next = pNode; + pNode->prev1 = pEntry; + + pEntry->num++; + pObj->size++; + +// char key[512] = {0}; +// memcpy(key, pNode->key, MIN(512, pNode->keyLen)); +// pTrace("key:%s %p add to hash table", key, pNode); +} + +/** + * add data node into hash table + * @param pObj hash object + * @param pNode hash node + */ +int32_t taosAddToHashTable(HashObj *pObj, const char *key, uint32_t keyLen, void *data, uint32_t size) { + if (pObj->multithreadSafe) { + __wr_lock(&pObj->lock); + } + + uint32_t hashVal = 0; + SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &hashVal); + + if (pNode == NULL) { // no data in hash table with the specified key, add it into hash table + taosHashTableResize(pObj); + + SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); + if (pNewNode == NULL) { + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return -1; + } + + doAddToHashTable(pObj, pNewNode); + } else { + SHashNode *pNewNode = doUpdateHashNode(pNode, key, keyLen, data, size); + if (pNewNode == NULL) { + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return -1; + } + + doUpdateHashTable(pObj, pNewNode); + } + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return 0; +} + +char *taosGetDataFromHash(HashObj *pObj, const char *key, uint32_t keyLen) { + if (pObj->multithreadSafe) { + __rd_lock(&pObj->lock); + } + + uint32_t hashVal = 0; + SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &hashVal); + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + if (pNode != NULL) { + assert(pNode->hashVal == hashVal); + + return pNode->data; + } else { + return NULL; + } +} + +/** + * remove node in hash list + * @param pObj + * @param pNode + */ +void taosDeleteFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen) { + if (pObj->multithreadSafe) { + __wr_lock(&pObj->lock); + } + + uint32_t val = 0; + SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &val); + if (pNode == NULL) { + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return; + } + + SHashNode *pNext = pNode->next; + if (pNode->prev != NULL) { + int32_t slot = HASH_INDEX(val, pObj->capacity); + if (pObj->hashList[slot]->next == pNode) { + pObj->hashList[slot]->next = pNext; + } else { + pNode->prev->next = pNext; + } + } + + if (pNext != NULL) { + pNext->prev = pNode->prev; + } + + uint32_t index = HASH_INDEX(pNode->hashVal, pObj->capacity); + SHashEntry *pEntry = pObj->hashList[index]; + pEntry->num--; + + pObj->size--; + + pNode->next = NULL; + pNode->prev = NULL; + + pTrace("key:%s %p remove from hash table", pNode->key, pNode); + tfree(pNode); + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } +} + +void taosCleanUpHashTable(void *handle) { + HashObj *pObj = (HashObj *)handle; + if (pObj == NULL || pObj->capacity <= 0) return; + + SHashNode *pNode, *pNext; + + if (pObj->multithreadSafe) { + __wr_lock(&pObj->lock); + } + + if (pObj->hashList) { + for (int32_t i = 0; i < pObj->capacity; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + pNode = pEntry->next; + + while (pNode) { + pNext = pNode->next; + free(pNode); + pNode = pNext; + } + + tfree(pEntry); + } + + free(pObj->hashList); + } + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + __lock_destroy(&pObj->lock); + } + + memset(pObj, 0, sizeof(HashObj)); + free(pObj); +} + +// for profile only +int32_t taosGetHashMaxOverflowLength(HashObj* pObj) { + if (pObj == NULL || pObj->size == 0) { + return 0; + } + + int32_t num = 0; + + for(int32_t i = 0; i < pObj->size; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + if (num < pEntry->num) { + num = pEntry->num; + } + } + + return num; +} + +int32_t taosCheckHashTable(HashObj *pObj) { + for(int32_t i = 0; i < pObj->capacity; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + + SHashNode* pNode = pEntry->next; + if (pNode != NULL) { + assert(pEntry == pNode->prev1); + int32_t num = 1; + + SHashNode* pNext = pNode->next; + + while(pNext) { + assert(pNext->prev == pNode); + + pNode = pNext; + pNext = pNext->next; + num ++; + } + + assert(num == pEntry->num); + } + } + + return 0; +} diff --git a/src/util/src/tbase64.c b/src/util/src/tbase64.c index 02ec756e04469bc4c1716f20b6530d29f817ac37..937adfde5cd68040bdaa330ad43cbcd31a724a71 100644 --- a/src/util/src/tbase64.c +++ b/src/util/src/tbase64.c @@ -98,7 +98,7 @@ unsigned char *base64_decode(const char *value, int inlen, int *outlen) { base64_decode_error: free(result); - *result = 0; + result = 0; *outlen = 0; return result; diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 8a2f1347df9e7eb7d7fb29623eab4120b5484aeb..e6213c45a41f104e6d948c2b46e281e9909ea2b8 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -20,6 +20,7 @@ #include "ttime.h" #include "ttimer.h" #include "tutil.h" +#include "hashutil.h" #define HASH_MAX_CAPACITY (1024*1024*16) #define HASH_VALUE_IN_TRASH (-1) @@ -587,8 +588,8 @@ void *taosAddDataIntoCache(void *handle, char *key, char *pData, int dataSize, i pNode = taosAddToCacheImpl(pObj, key, keyLen, pData, dataSize, keepTime * 1000L); if (NULL != pNode) { pTrace( - "key:%s %p added into cache, slot:%d, addTime:%lld, expireTime:%lld, cache total:%d, " - "size:%lldbytes, collision:%d", + "key:%s %p added into cache, slot:%d, addTime:%" PRIu64 ", expireTime:%" PRIu64 ", cache total:%d, " + "size:%" PRId64 " bytes, collision:%d", pNode->key, pNode, HASH_INDEX(pNode->hashVal, pObj->capacity), pNode->addTime, pNode->time, pObj->size, pObj->totalSize, pObj->statistics.numOfCollision); } @@ -711,7 +712,7 @@ void *taosUpdateDataFromCache(void *handle, char *key, char *pData, int size, in pObj->totalSize); } else { pNew = taosUpdateCacheImpl(pObj, pNode, key, keyLen, pData, size, duration * 1000L); - pTrace("key:%s updated.expireTime:%lld.refCnt:%d", key, pNode->time, pNode->refCount); + pTrace("key:%s updated.expireTime:%" PRIu64 ".refCnt:%d", key, pNode->time, pNode->refCount); } __cache_unlock(pObj); @@ -901,5 +902,46 @@ void taosCleanUpDataCache(void *handle) { } pObj->deleting = 1; - return; +} + +void* taosGetDataFromExists(void* handle, void* data) { + SCacheObj *pObj = (SCacheObj *)handle; + if (pObj == NULL || data == NULL) return NULL; + + size_t offset = offsetof(SDataNode, data); + SDataNode *ptNode = (SDataNode *)((char *)data - offset); + + if (ptNode->signature != (uint64_t) ptNode) { + pError("key: %p the data from cache is invalid", ptNode); + return NULL; + } + + int32_t ref = atomic_add_fetch_32(&ptNode->refCount, 1); + pTrace("%p add ref data in cache, refCnt:%d", data, ref) + + // the data if referenced by at least one object, so the reference count must be greater than the value of 2. + assert(ref >= 2); + return data; +} + +void* taosTransferDataInCache(void* handle, void** data) { + SCacheObj *pObj = (SCacheObj *)handle; + if (pObj == NULL || data == NULL) return NULL; + + size_t offset = offsetof(SDataNode, data); + SDataNode *ptNode = (SDataNode *)((char *)(*data) - offset); + + if (ptNode->signature != (uint64_t) ptNode) { + pError("key: %p the data from cache is invalid", ptNode); + return NULL; + } + + assert(ptNode->refCount >= 1); + + char* d = *data; + + // clear its reference to old area + *data = NULL; + + return d; } diff --git a/src/util/src/textbuffer.c b/src/util/src/textbuffer.c index 3e71d90147aaa0e4e94992ad6846d7bf5efbbc28..e1c571f4c28a3b40ba7f7d2cdd33be5e4d965946 100644 --- a/src/util/src/textbuffer.c +++ b/src/util/src/textbuffer.c @@ -516,20 +516,20 @@ tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nE if (pDesc->pSchema->numOfCols != 1 || pDesc->pSchema->colOffset[0] != 0) { pError("MemBucket:%p,only consecutive data is allowed,invalid numOfCols:%d or offset:%d", - *pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]); + pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]); tfree(pBucket); return NULL; } if (pDesc->pSchema->pFields[0].type != dataType) { - pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", *pBucket, + pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", pBucket, pDesc->pSchema->pFields[0].type, dataType); tfree(pBucket); return NULL; } if (pBucket->numOfTotalPages < pBucket->nTotalSlots) { - pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", *pBucket, pBucket->numOfTotalPages); + pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", pBucket, pBucket->numOfTotalPages); } pBucket->pSegs = (tMemBucketSegment *)malloc(pBucket->numOfSegs * sizeof(tMemBucketSegment)); @@ -540,7 +540,7 @@ tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nE pBucket->pSegs[i].pBoundingEntries = NULL; } - pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", *pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE, + pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE, pBucket->nElemSize); return pBucket; @@ -1258,6 +1258,7 @@ static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx, for (uint32_t j = 0; j < pFlushInfo->numOfPages; ++j) { ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + UNUSED(ret); assert(pPage->numOfElems > 0); tColModelAppend(pDesc->pSchema, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems); @@ -1602,7 +1603,7 @@ void tColModelAppend(tColModel *dstModel, tFilePage *dstPage, void *srcData, int tOrderDescriptor *tOrderDesCreate(int32_t *orderColIdx, int32_t numOfOrderCols, tColModel *pModel, int32_t tsOrderType) { - tOrderDescriptor *desc = (tOrderDescriptor *)malloc(sizeof(tOrderDescriptor) + sizeof(int32_t) * numOfOrderCols); + tOrderDescriptor *desc = (tOrderDescriptor *)calloc(1, sizeof(tOrderDescriptor) + sizeof(int32_t) * numOfOrderCols); if (desc == NULL) { return NULL; } @@ -1917,6 +1918,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) for (uint32_t jx = 0; jx < pFlushInfo->numOfPages; ++jx) { ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + UNUSED(ret); tMemBucketPut(pMemBucket, pPage->data, pPage->numOfElems); } diff --git a/src/util/src/tglobalcfg.c b/src/util/src/tglobalcfg.c index 2db036824deea494682a3ae0a8bcd7e0bd84d0c5..45efcad56394b1507c60d383a60199d0b96afb02 100644 --- a/src/util/src/tglobalcfg.c +++ b/src/util/src/tglobalcfg.c @@ -56,11 +56,7 @@ int tscEmbedded = 0; */ int64_t tsMsPerDay[] = {86400000L, 86400000000L}; -#ifdef CLUSTER char tsMasterIp[TSDB_IPv4ADDR_LEN] = {0}; -#else -char tsMasterIp[TSDB_IPv4ADDR_LEN] = "127.0.0.1"; -#endif char tsSecondIp[TSDB_IPv4ADDR_LEN] = {0}; uint16_t tsMgmtShellPort = 6030; // udp[6030-6034] tcp[6030] uint16_t tsVnodeShellPort = 6035; // udp[6035-6039] tcp[6035] @@ -84,9 +80,20 @@ short tsNumOfVnodesPerCore = 8; short tsNumOfTotalVnodes = 0; short tsCheckHeaderFile = 0; +#ifdef _TD_ARM_32_ +int tsSessionsPerVnode = 100; +#else int tsSessionsPerVnode = 1000; +#endif + int tsCacheBlockSize = 16384; // 256 columns int tsAverageCacheBlocks = TSDB_DEFAULT_AVG_BLOCKS; +/** + * Change the meaning of affected rows: + * 0: affected rows not include those duplicate records + * 1: affected rows include those duplicate records + */ +short tsAffectedRowsMod = 0; int tsRowsInFileBlock = 4096; float tsFileBlockMinPercent = 0.05; @@ -128,6 +135,10 @@ int tsEnableMonitorModule = 1; int tsRestRowLimit = 10240; int tsMaxSQLStringLen = TSDB_MAX_SQL_LEN; +// the maximum number of results for projection query on super table that are returned from +// one virtual node, to order according to timestamp +int tsMaxNumOfOrderedResults = 100000; + /* * denote if the server needs to compress response message at the application layer to client, including query rsp, * metricmeta rsp, and multi-meter query rsp message body. The client compress the submit message to server. @@ -138,18 +149,29 @@ int tsMaxSQLStringLen = TSDB_MAX_SQL_LEN; */ int tsCompressMsgSize = -1; -char tsSocketType[4] = "udp"; // use UDP by default[option: udp, tcp] -int tsTimePrecision = TSDB_TIME_PRECISION_MILLI; // time precision, millisecond by default -int tsMinSlidingTime = 10; // 10 ms for sliding time, the value will changed in - // case of time precision changed -int tsMinIntervalTime = 10; // 10 ms for interval time range, changed accordingly -int tsMaxStreamComputDelay = 20000; // 20sec, the maximum value of stream - // computing delay, changed accordingly -int tsStreamCompStartDelay = 10000; // 10sec, the first stream computing delay - // time after system launched successfully, - // changed accordingly -int tsStreamCompRetryDelay = 10; // the stream computing delay time after - // executing failed, change accordingly +// use UDP by default[option: udp, tcp] +char tsSocketType[4] = "udp"; + +// time precision, millisecond by default +int tsTimePrecision = TSDB_TIME_PRECISION_MILLI; + +// 10 ms for sliding time, the value will changed in case of time precision changed +int tsMinSlidingTime = 10; + +// 10 ms for interval time range, changed accordingly +int tsMinIntervalTime = 10; + +// 20sec, the maximum value of stream computing delay, changed accordingly +int tsMaxStreamComputDelay = 20000; + +// 10sec, the first stream computing delay time after system launched successfully, changed accordingly +int tsStreamCompStartDelay = 10000; + +// the stream computing delay time after executing failed, change accordingly +int tsStreamCompRetryDelay = 10; + +// The delayed computing ration. 10% of the whole computing time window by default. +float tsStreamComputDelayRatio = 0.1; int tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance @@ -444,7 +466,7 @@ static void doInitGlobalConfig() { // ip address tsInitConfigOption(cfg++, "masterIp", tsMasterIp, TSDB_CFG_VTYPE_IPSTR, - TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_CLUSTER, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "secondIp", tsSecondIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_CLUSTER, @@ -539,6 +561,9 @@ static void doInitGlobalConfig() { tsInitConfigOption(cfg++, "alternativeRole", &tsAlternativeRole, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, 0, 2, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "affectedRowsMod", &tsAffectedRowsMod, TSDB_CFG_VTYPE_SHORT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, + 0, 1, 0, TSDB_CFG_UTYPE_NONE); // 0-any, 1-mgmt, 2-dnode // timer @@ -617,9 +642,12 @@ static void doInitGlobalConfig() { TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1000, 1000000000, 0, TSDB_CFG_UTYPE_MS); tsInitConfigOption(cfg++, "retryStreamCompDelay", &tsStreamCompRetryDelay, TSDB_CFG_VTYPE_INT, - TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, - 10, 1000000000, 0, TSDB_CFG_UTYPE_MS); - + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 1000000000, 0, TSDB_CFG_UTYPE_MS); + + + tsInitConfigOption(cfg++, "streamCompDelayRatio", &tsStreamComputDelayRatio, TSDB_CFG_VTYPE_FLOAT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0.1, 0.9, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "clog", &tsCommitLog, TSDB_CFG_VTYPE_SHORT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 1, 0, TSDB_CFG_UTYPE_NONE); @@ -662,6 +690,10 @@ static void doInitGlobalConfig() { TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_BYTE); + tsInitConfigOption(cfg++, "maxNumOfOrderedRes", &tsMaxNumOfOrderedResults, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, + TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_NONE); + // locale & charset tsInitConfigOption(cfg++, "timezone", tsTimezone, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, @@ -787,11 +819,9 @@ static void doInitGlobalConfig() { TSDB_CFG_CTYPE_B_CONFIG, 0, 1, 0, TSDB_CFG_UTYPE_NONE); -#ifdef CLUSTER tsInitConfigOption(cfg++, "anyIp", &tsAnyIp, TSDB_CFG_VTYPE_INT, - TSDB_CFG_CTYPE_B_CONFIG, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, 0, 1, 0, TSDB_CFG_UTYPE_NONE); -#endif // version info tsInitConfigOption(cfg++, "gitinfo", gitinfo, TSDB_CFG_VTYPE_STRING, @@ -1108,9 +1138,9 @@ void tsSetAllDebugFlag() { * In case that the setLocale failed to be executed, the right charset needs to be set. */ void tsSetLocale() { - char msgLocale[] = "Invalid locale:%s, please set the valid locale in config file"; - char msgCharset[] = "Invalid charset:%s, please set the valid charset in config file"; - char msgCharset1[] = "failed to get charset, please set the valid charset in config file"; + char msgLocale[] = "Invalid locale:%s, please set the valid locale in config file\n"; + char msgCharset[] = "Invalid charset:%s, please set the valid charset in config file\n"; + char msgCharset1[] = "failed to get charset, please set the valid charset in config file\n"; char *locale = setlocale(LC_CTYPE, tsLocale); diff --git a/src/util/src/thashutil.c b/src/util/src/thashutil.c index b6b3ea682ef945a838f67ca227c8033624234725..cf16efe2f8e539f9611952111bafc5d4ff214d3e 100644 --- a/src/util/src/thashutil.c +++ b/src/util/src/thashutil.c @@ -8,6 +8,7 @@ * */ #include "tutil.h" +#include "hashutil.h" #define ROTL32(x, r) ((x) << (r) | (x) >> (32 - (r))) @@ -67,7 +68,7 @@ static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out) *(uint32_t *)out = h1; } -uint32_t MurmurHash3_32(const void *key, int len) { +uint32_t MurmurHash3_32(const char *key, uint32_t len) { const int32_t hashSeed = 0x12345678; uint32_t val = 0; @@ -75,3 +76,31 @@ uint32_t MurmurHash3_32(const void *key, int len) { return val; } + +uint32_t taosIntHash_32(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint32_t *)key; } +uint32_t taosIntHash_16(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint16_t *)key; } +uint32_t taosIntHash_8(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint8_t *)key; } + +uint32_t taosIntHash_64(const char *key, uint32_t UNUSED_PARAM(len)) { + uint64_t val = *(uint64_t *)key; + + uint64_t hash = val >> 16U; + hash += (val & 0xFFFFU); + + return hash; +} + +_hash_fn_t taosGetDefaultHashFunction(int32_t type) { + _hash_fn_t fn = NULL; + switch(type) { + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: fn = taosIntHash_64;break; + case TSDB_DATA_TYPE_BINARY: fn = MurmurHash3_32;break; + case TSDB_DATA_TYPE_INT: fn = taosIntHash_32; break; + case TSDB_DATA_TYPE_SMALLINT: fn = taosIntHash_16; break; + case TSDB_DATA_TYPE_TINYINT: fn = taosIntHash_8; break; + default: fn = taosIntHash_32;break; + } + + return fn; +} \ No newline at end of file diff --git a/src/util/src/thistogram.c b/src/util/src/thistogram.c index a3f6e7203cb63f8fbb80f3f524b510820b39facf..93046cf796220c1d0fb2a64d1b3501f254ea351d 100644 --- a/src/util/src/thistogram.c +++ b/src/util/src/thistogram.c @@ -453,7 +453,7 @@ void tHistogramPrint(SHistogramInfo* pHisto) { for (int32_t i = 0; i < pHisto->numOfEntries; ++i) { SHistBin* pEntry = (SHistBin*)pNode->pData; - printf("%d: (%f, %lld)\n", i + 1, pEntry->val, pEntry->num); + printf("%d: (%f, %" PRId64 ")\n", i + 1, pEntry->val, pEntry->num); pNode = pNode->pForward[0]; } #endif diff --git a/src/util/src/tinterpolation.c b/src/util/src/tinterpolation.c index ee0c7aa0097d4ae3994b6264eedd6de94b38699f..5df07a5c430947ecbac516cedc79238424d70c17 100644 --- a/src/util/src/tinterpolation.c +++ b/src/util/src/tinterpolation.c @@ -37,7 +37,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t timeRange, char * here we revised the start time of day according to the local time zone, * but in case of DST, the start time of one day need to be dynamically decided. * - * TODO dynmaically decide the start time of a day + * TODO dynamically decide the start time of a day */ #if defined(WINDOWS) && _MSC_VER >= 1900 @@ -77,12 +77,24 @@ void taosInitInterpoInfo(SInterpolationInfo* pInterpoInfo, int32_t order, int64_ tfree(pInterpoInfo->prevValues); } +// the SInterpolationInfo itself will not be released +void taosDestoryInterpoInfo(SInterpolationInfo *pInterpoInfo) { + if (pInterpoInfo == NULL) { + return; + } + + tfree(pInterpoInfo->prevValues); + tfree(pInterpoInfo->nextValues); + + tfree(pInterpoInfo->pTags); +} + void taosInterpoSetStartInfo(SInterpolationInfo* pInterpoInfo, int32_t numOfRawDataInRows, int32_t type) { if (type == TSDB_INTERPO_NONE) { return; } - pInterpoInfo->rowIdx = INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? 0 : numOfRawDataInRows - 1; + pInterpoInfo->rowIdx = 0;//INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? 0 : numOfRawDataInRows - 1; pInterpoInfo->numOfRawDataInRows = numOfRawDataInRows; } @@ -106,14 +118,14 @@ int32_t taosGetNumOfResWithoutLimit(SInterpolationInfo* pInterpoInfo, int64_t* p if (numOfAvailRawData > 0) { int32_t finalNumOfResult = 0; - if (pInterpoInfo->order == TSQL_SO_ASC) { +// if (pInterpoInfo->order == TSQL_SO_ASC) { // get last timestamp, calculate the result size int64_t lastKey = pPrimaryKeyArray[pInterpoInfo->numOfRawDataInRows - 1]; - finalNumOfResult = (int32_t)((lastKey - pInterpoInfo->startTimestamp) / nInterval) + 1; - } else { // todo error less than one!!! - TSKEY lastKey = pPrimaryKeyArray[0]; - finalNumOfResult = (int32_t)((pInterpoInfo->startTimestamp - lastKey) / nInterval) + 1; - } + finalNumOfResult = (int32_t)(labs(lastKey - pInterpoInfo->startTimestamp) / nInterval) + 1; +// } else { // todo error less than one!!! +// TSKEY lastKey = pPrimaryKeyArray[0]; +// finalNumOfResult = (int32_t)((pInterpoInfo->startTimestamp - lastKey) / nInterval) + 1; +// } assert(finalNumOfResult >= numOfAvailRawData); return finalNumOfResult; @@ -186,11 +198,11 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi } static char* getPos(char* data, int32_t bytes, int32_t order, int32_t capacity, int32_t index) { - if (order == TSQL_SO_ASC) { +// if (order == TSQL_SO_ASC) { return data + index * bytes; - } else { - return data + (capacity - index - 1) * bytes; - } +// } else { +// return data + (capacity - index - 1) * bytes; +// } } static void setTagsValueInInterpolation(tFilePage** data, char** pTags, tColModel* pModel, int32_t order, int32_t start, @@ -283,8 +295,8 @@ static void doInterpoResultImpl(SInterpolationInfo* pInterpoInfo, int16_t interp int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoType, tFilePage** data, int32_t numOfRawDataInRows, int32_t outputRows, int64_t nInterval, - int64_t* pPrimaryKeyArray, tColModel* pModel, char** srcData, int64_t* defaultVal, - int32_t* functionIDs, int32_t bufSize) { + const int64_t* pPrimaryKeyArray, tColModel* pModel, char** srcData, int64_t* defaultVal, + const int32_t* functionIDs, int32_t bufSize) { int32_t num = 0; pInterpoInfo->numOfCurrentInterpo = 0; @@ -385,7 +397,7 @@ int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoTyp } pInterpoInfo->startTimestamp += (nInterval * step); - pInterpoInfo->rowIdx += step; + pInterpoInfo->rowIdx += 1; num += 1; if ((pInterpoInfo->rowIdx >= pInterpoInfo->numOfRawDataInRows && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 1a7f672e00321c0891aa54ae9f3cc3efedb89d54..21818e572f3fc49a2841c2f494362e2a7103f9f0 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -45,6 +45,7 @@ typedef struct { uint32_t uDebugFlag = 131; // all the messages short tsAsyncLog = 1; +static pid_t logPid = 0; static SLogBuff *logHandle = NULL; static int taosLogFileNum = 1; static int taosLogMaxLines = 0; @@ -82,6 +83,11 @@ int taosStartLog() { } int taosInitLog(char *logName, int numOfLogLines, int maxFiles) { + +#ifdef LINUX + logPid = (pid_t)syscall(SYS_gettid); +#endif + logHandle = taosLogBuffNew(TSDB_DEFAULT_LOG_BUF_SIZE); if (logHandle == NULL) return -1; @@ -306,8 +312,8 @@ char *tprefix(char *prefix) { sprintf(prefix, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); #else - sprintf(prefix, "%02d/%02d %02d:%02d:%02d.%06d 0x%lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, - ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + sprintf(prefix, "%02d/%02d %02d:%02d:%02d.%06d %d 0x%lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, logPid, pthread_self()); #endif return prefix; } @@ -333,8 +339,8 @@ void tprintf(const char *const flags, int dflag, const char *const format, ...) len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); #else - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, - ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, logPid, pthread_self()); #endif len += sprintf(buffer + len, "%s", flags); @@ -424,8 +430,8 @@ void taosPrintLongString(const char *const flags, int dflag, const char *const f len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); #else - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, - ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, logPid, pthread_self()); #endif len += sprintf(buffer + len, "%s", flags); diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index d12cc6e613cc9ce80574ab456e4fddb0ea4d75ad..7a133590d2a450d8e8b688bc63515c0ad9e81912 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -231,8 +231,13 @@ void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...) gettimeofday(&timeSecs, NULL); curTime = timeSecs.tv_sec; ptm = localtime_r(&curTime, &Tm); - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); - +#ifndef LINUX + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, + ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); +#else + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); +#endif va_start(argpointer, format); len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); va_end(argpointer); diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 0ebee5a8f4379b168e7c6e17a68daecf7cd0b457..7ab004646e3094d1a231e30eafaf6a966f0bba8b 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -516,7 +516,7 @@ int taosCopyFds(int sfd, int dfd, int64_t len) { int retLen = taosReadMsg(sfd, temp, (int)readLen); if (readLen != retLen) { - pError("read error, readLen:%d retLen:%d len:%ld leftLen:%ld, reason:%s", readLen, retLen, len, leftLen, + pError("read error, readLen:%d retLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, retLen, len, leftLen, strerror(errno)); return -1; } @@ -524,7 +524,7 @@ int taosCopyFds(int sfd, int dfd, int64_t len) { writeLen = taosWriteMsg(dfd, temp, readLen); if (readLen != writeLen) { - pError("copy error, readLen:%d writeLen:%d len:%ld leftLen:%ld, reason:%s", readLen, writeLen, len, leftLen, + pError("copy error, readLen:%d writeLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, writeLen, len, leftLen, strerror(errno)); return -1; } diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c index 05ba01979e7e4281f760bd1573de0675a8d03e66..65c5d0ea4c86f2cd05628bbc2f9e35da89d0d7c6 100644 --- a/src/util/src/ttime.c +++ b/src/util/src/ttime.c @@ -24,6 +24,97 @@ #include "ttime.h" #include "tutil.h" +/* + * mktime64 - Converts date to seconds. + * Converts Gregorian date to seconds since 1970-01-01 00:00:00. + * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 + * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. + * + * [For the Julian calendar (which was used in Russia before 1917, + * Britain & colonies before 1752, anywhere else before 1582, + * and is still in use by some communities) leave out the + * -year/100+year/400 terms, and add 10.] + * + * This algorithm was first published by Gauss (I think). + * + * A leap second can be indicated by calling this function with sec as + * 60 (allowable under ISO 8601). The leap second is treated the same + * as the following second since they don't exist in UNIX time. + * + * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight + * tomorrow - (allowable under ISO 8601) is supported. + */ +int64_t user_mktime64(const unsigned int year0, const unsigned int mon0, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec) +{ + unsigned int mon = mon0, year = year0; + + /* 1..12 -> 11,12,1..10 */ + if (0 >= (int) (mon -= 2)) { + mon += 12; /* Puts Feb last since it has leap day */ + year -= 1; + } + + int64_t res = (((((int64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) + + year*365 - 719499)*24 + hour)*60 + min)*60 + sec); + + return (res + timezone); +} +// ==== mktime() kernel code =================// +static int64_t m_deltaUtc = 0; +void deltaToUtcInitOnce() { + struct tm tm = {0}; + + (void)strptime("1970-01-01 00:00:00", (const char *)("%Y-%m-%d %H:%M:%S"), &tm); + m_deltaUtc = (int64_t)mktime(&tm); + //printf("====delta:%lld\n\n", seconds); + return; +} + +int64_t user_mktime(struct tm * tm) +{ +#define TAOS_MINUTE 60 +#define TAOS_HOUR (60*TAOS_MINUTE) +#define TAOS_DAY (24*TAOS_HOUR) +#define TAOS_YEAR (365*TAOS_DAY) + +static int month[12] = { + 0, + TAOS_DAY*(31), + TAOS_DAY*(31+29), + TAOS_DAY*(31+29+31), + TAOS_DAY*(31+29+31+30), + TAOS_DAY*(31+29+31+30+31), + TAOS_DAY*(31+29+31+30+31+30), + TAOS_DAY*(31+29+31+30+31+30+31), + TAOS_DAY*(31+29+31+30+31+30+31+31), + TAOS_DAY*(31+29+31+30+31+30+31+31+30), + TAOS_DAY*(31+29+31+30+31+30+31+31+30+31), + TAOS_DAY*(31+29+31+30+31+30+31+31+30+31+30) +}; + + int64_t res; + int year; + + year= tm->tm_year - 70; + res= TAOS_YEAR*year + TAOS_DAY*((year+1)/4); + res+= month[tm->tm_mon]; + + if(tm->tm_mon > 1 && ((year+2)%4)) { + res-= TAOS_DAY; + } + + res+= TAOS_DAY*(tm->tm_mday-1); + res+= TAOS_HOUR*tm->tm_hour; + res+= TAOS_MINUTE*tm->tm_min; + res+= tm->tm_sec; + + return res + m_deltaUtc; + +} + + static int64_t parseFraction(char* str, char** end, int32_t timePrec); static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec); static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec); @@ -237,7 +328,10 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) { } /* mktime will be affected by TZ, set by using taos_options */ - int64_t seconds = mktime(&tm); + //int64_t seconds = mktime(&tm); + //int64_t seconds = (int64_t)user_mktime(&tm); + int64_t seconds = user_mktime64(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + int64_t fraction = 0; if (*str == '.') { diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 0fbc4dc93503db57dd1fd98e79516cfbd89a5515..a1e7a6828c87e846fd1600a33a06d8b171b4523e 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -14,11 +14,13 @@ */ #include "os.h" +#include "hashutil.h" #include "shash.h" #include "tutil.h" #include "tsqldef.h" #include "tstoken.h" #include "ttypes.h" +#include "hash.h" // All the keywords of the SQL language are stored in a hash table typedef struct SKeyword { @@ -225,11 +227,14 @@ static SKeyword keywordTable[] = { {"STABLE", TK_STABLE}, {"FILE", TK_FILE}, {"VNODES", TK_VNODES}, + {"UNION", TK_UNION}, + {"RATE", TK_RATE}, + {"IRATE", TK_IRATE}, + {"SUM_RATE", TK_SUM_RATE}, + {"AVG_RATE", TK_AVG_RATE}, + {"AVG_IRATE", TK_AVG_IRATE}, }; -/* This is the hash table */ -static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; - static const char isIdChar[] = { /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ @@ -243,27 +248,22 @@ static const char isIdChar[] = { }; static void* KeywordHashTable = NULL; -int tSQLKeywordCode(const char* z, int n) { - int i; - static char needInit = 1; - if (needInit) { - // Initialize the keyword hash table - pthread_mutex_lock(&mutex); - - // double check - if (needInit) { - int nk = tListLen(keywordTable); - - KeywordHashTable = taosInitStrHash(nk, POINTER_BYTES, taosHashStringStep1); - for (i = 0; i < nk; i++) { - keywordTable[i].len = strlen(keywordTable[i].name); - void* ptr = &keywordTable[i]; - taosAddStrHash(KeywordHashTable, (char*)keywordTable[i].name, (void*)&ptr); - } - needInit = 0; - } - pthread_mutex_unlock(&mutex); + +static void doInitKeywordsTable() { + int numOfEntries = tListLen(keywordTable); + + KeywordHashTable = taosInitHashTable(numOfEntries, MurmurHash3_32, false); + for (int32_t i = 0; i < numOfEntries; i++) { + keywordTable[i].len = strlen(keywordTable[i].name); + void* ptr = &keywordTable[i]; + taosAddToHashTable(KeywordHashTable, keywordTable[i].name, keywordTable[i].len, (void*)&ptr, POINTER_BYTES); } +} + +static pthread_once_t keywordsHashTableInit = PTHREAD_ONCE_INIT; + +int tSQLKeywordCode(const char* z, int n) { + pthread_once(&keywordsHashTableInit, doInitKeywordsTable); char key[128] = {0}; for (int32_t j = 0; j < n; ++j) { @@ -274,7 +274,7 @@ int tSQLKeywordCode(const char* z, int n) { } } - SKeyword** pKey = (SKeyword**)taosGetStrHashData(KeywordHashTable, key); + SKeyword** pKey = (SKeyword**)taosGetDataFromHash(KeywordHashTable, key, n); if (pKey != NULL) { return (*pKey)->type; } else { diff --git a/src/util/src/ttypes.c b/src/util/src/ttypes.c index b048748d95926a3d566b7dc82aecdd6dcc936eae..ae994cb77b7cdb27f3e857115d6d1db7df9bd9b0 100644 --- a/src/util/src/ttypes.c +++ b/src/util/src/ttypes.c @@ -163,14 +163,13 @@ void tVariantCreateFromBinary(tVariant *pVar, char *pz, uint32_t len, uint32_t t void tVariantDestroy(tVariant *pVar) { if (pVar == NULL) return; - if ((pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) && pVar->nLen > 0) { - free(pVar->pz); - pVar->pz = NULL; + if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) { + tfree(pVar->pz); pVar->nLen = 0; } } -void tVariantAssign(tVariant *pDst, tVariant *pSrc) { +void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { if (pSrc == NULL || pDst == NULL) return; *pDst = *pSrc; @@ -726,7 +725,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, char type) { *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; return 0; } else { - double value; + double value = 0; int32_t ret; ret = convertToDouble(pVariant->pz, pVariant->nLen, &value); if ((errno == ERANGE && value == -1) || (ret != 0)) { @@ -977,11 +976,21 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { break; } case TSDB_DATA_TYPE_FLOAT: { + #ifdef _TD_ARM_32_ + float fv = GET_FLOAT_VAL(src); + SET_FLOAT_VAL_ALIGN(val, &fv); + #else *((float *)val) = GET_FLOAT_VAL(src); + #endif break; }; case TSDB_DATA_TYPE_DOUBLE: { + #ifdef _TD_ARM_32_ + double dv = GET_DOUBLE_VAL(src); + SET_DOUBLE_VAL_ALIGN(val, &dv); + #else *((double *)val) = GET_DOUBLE_VAL(src); + #endif break; }; case TSDB_DATA_TYPE_TIMESTAMP: diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index 4db7adf2074f9b83e52c2f51a84fc72befcabcca..21d147d9473fb643df3bdc719aa19699619ea2d7 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -568,13 +568,13 @@ char *taosIpStr(uint32_t ipInt) { void taosCleanupTier() {} #endif -FORCE_INLINE float taos_align_get_float(char* pBuf) { +FORCE_INLINE float taos_align_get_float(const char* pBuf) { float fv = 0; *(int32_t*)(&fv) = *(int32_t*)pBuf; return fv; } -FORCE_INLINE double taos_align_get_double(char* pBuf) { +FORCE_INLINE double taos_align_get_double(const char* pBuf) { double dv = 0; *(int64_t*)(&dv) = *(int64_t*)pBuf; return dv; diff --git a/src/util/src/version.c b/src/util/src/version.c index 9d75eb897436037babfac1e27610950cd9aaf094..d1294801ee7e29cdc91e8096889e7b06baf7bb67 100644 --- a/src/util/src/version.c +++ b/src/util/src/version.c @@ -1,5 +1,7 @@ -char version[64] = "1.6.5.1"; +char version[64] = "1.6.5.4"; char compatible_version[64] = "1.6.1.0"; -char gitinfo[128] = "2ea714387009421beb35e7f03b94c6a87d22529a"; -char gitinfoOfInternal[128] = "950f54ac026bc05bcec5cff356f4964a18d635bd"; -char buildinfo[512] = "Built by ubuntu at 2019-12-21 11:14"; +char gitinfo[128] = "3264067e97300c84caa61ac909d548c9ca56de6b"; +char gitinfoOfInternal[128] = "da88f4a2474737d1f9c76adcf0ff7fd0975e7342"; +char buildinfo[512] = "Built by root at 2020-02-05 14:38"; + +void libtaos_1_6_5_4_Linux_x64() {}; diff --git a/tests/examples/c/CMakeLists.txt b/tests/examples/c/CMakeLists.txt index af0b8cd18d391ab328c669514a1596f9cd8f986f..287fca7d410b88d240642a57ec194b3d0c686975 100644 --- a/tests/examples/c/CMakeLists.txt +++ b/tests/examples/c/CMakeLists.txt @@ -1,6 +1,6 @@ PROJECT(TDengine) -IF (TD_WINDOWS) +IF (TD_WINDOWS_64) INCLUDE_DIRECTORIES(${TD_ROOT_DIR}/deps/pthread) ENDIF () diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index ac8ff21aaffc47bbf2a36386f06996b6ad13a086..0a4b8ee9d2bd00ab3daaac0c3a93497de4fd03f8 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -3,21 +3,23 @@ ROOT=./ TARGET=exe -LFLAGS = '-Wl,-rpath,/usr/local/taos/driver' -ltaos -lpthread -lm -lrt -CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 -std=gnu99 +LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt +#LFLAGS = '-Wl,-rpath,/home/zbm/project/td/debug/build/lib/' -L/home/zbm/project/td/debug/build/lib -ltaos -lpthread -lm -lrt +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 \ + -I/usr/local/taos/include -std=gnu99 all: $(TARGET) exe: gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)/asyncdemo $(LFLAGS) gcc $(CFLAGS) ./demo.c -o $(ROOT)/demo $(LFLAGS) + gcc $(CFLAGS) ./prepare.c -o $(ROOT)/prepare $(LFLAGS) gcc $(CFLAGS) ./stream.c -o $(ROOT)/stream $(LFLAGS) - gcc $(CFLAGS) ./subscribe.c -o $(ROOT)/subscribe $(LFLAGS) + gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) clean: - rm $(ROOT)asyncdemo - rm $(ROOT)demo - rm $(ROOT)stream - rm $(ROOT)subscribe - - \ No newline at end of file + rm $(ROOT)/asyncdemo + rm $(ROOT)/demo + rm $(ROOT)/prepare + rm $(ROOT)/stream + rm $(ROOT)/subscribe diff --git a/tests/examples/c/stream.c b/tests/examples/c/stream.c index 623775c7801c196f2a78ac99cd3ca5fc39fe86f5..060f5b84ff276579019d3278552e424b2a4198e9 100755 --- a/tests/examples/c/stream.c +++ b/tests/examples/c/stream.c @@ -162,12 +162,13 @@ void* insert_rows(void *sarg) } // insert data - int index = 0; + int64_t begin = (int64_t)time(NULL); + int index = 0; while (1) { if (g_thread_exit_flag) break; index++; - sprintf(command, "insert into %s values (%ld, %d)", winfo->tbl_name, 1546300800000+index*1000, index); + sprintf(command, "insert into %s values (%ld, %d)", winfo->tbl_name, (begin + index) * 1000, index); if (taos_query(taos, command)) { printf("failed to insert row [%s], reason:%s\n", command, taos_errstr(taos)); } diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c index 219fa133e02f24cbafb5d446ff3a4aacac2f9c67..0bf93f6f2ddd81e715e7d9cf0b5abfd054635060 100644 --- a/tests/examples/c/subscribe.c +++ b/tests/examples/c/subscribe.c @@ -1,18 +1,3 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - // sample code for TDengine subscribe/consume API // to compile: gcc -o subscribe subscribe.c -ltaos @@ -20,43 +5,239 @@ #include #include #include // include TDengine header file +#include + +void print_result(TAOS_RES* res, int blockFetch) { + TAOS_ROW row = NULL; + int num_fields = taos_num_fields(res); + TAOS_FIELD* fields = taos_fetch_fields(res); + int nRows = 0; + + if (blockFetch) { + nRows = taos_fetch_block(res, &row); + for (int i = 0; i < nRows; i++) { + char temp[256]; + taos_print_row(temp, row + i, fields, num_fields); + puts(temp); + } + } else { + while ((row = taos_fetch_row(res))) { + char temp[256]; + taos_print_row(temp, row, fields, num_fields); + puts(temp); + nRows++; + } + } + + printf("%d rows consumed.\n", nRows); +} -int main(int argc, char *argv[]) -{ - TAOS_SUB *tsub; + +void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + print_result(res, *(int*)param); +} + + +void check_row_count(int line, TAOS_RES* res, int expected) { + int actual = 0; TAOS_ROW row; - char dbname[64], table[64]; - char temp[256]; + while ((row = taos_fetch_row(res))) { + actual++; + } + if (actual != expected) { + printf("line %d: row count mismatch, expected: %d, actual: %d\n", line, expected, actual); + } else { + printf("line %d: %d rows consumed as expected\n", line, actual); + } +} - if ( argc == 1 ) { - printf("usage: %s server-ip db-name table-name \n", argv[0]); - exit(0); - } - if ( argc >= 2 ) strcpy(dbname, argv[2]); - if ( argc >= 3 ) strcpy(table, argv[3]); +void run_test(TAOS* taos) { + taos_query(taos, "drop database if exists test;"); + + usleep(100000); + taos_query(taos, "create database test tables 5;"); + usleep(100000); + taos_query(taos, "use test;"); + usleep(100000); + taos_query(taos, "create table meters(ts timestamp, a int, b binary(20)) tags(loc binary(20), area int);"); + + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:00:00.000', 0, 'china');"); + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:01:00.000', 0, 'china');"); + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:02:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:00:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:01:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:02:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:03:00.000', 0, 'china');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:00:00.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:00.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:01.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:02.000', 0, 'UK');"); + taos_query(taos, "insert into t3 using meters tags('tianjin', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t4 using meters tags('wuhan', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t5 using meters tags('jinan', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t6 using meters tags('haikou', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t7 using meters tags('nanjing', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t8 using meters tags('lanzhou', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t9 using meters tags('tokyo', 0) values('2020-01-01 00:01:02.000', 0, 'japan');"); + + // super tables subscription + + TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + TAOS_RES* res = taos_consume(tsub); + check_row_count(__LINE__, res, 18); + + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + taos_query(taos, "insert into t0 values('2020-01-01 00:03:00.000', 0, 'china');"); + taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 2); + + taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0, 'UK');"); + taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0, 'UK');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 2); + + taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 1); - tsub = taos_subscribe(argv[1], "root", "taosdata", dbname, table, 0, 1000); - if ( tsub == NULL ) { - printf("failed to connet to db:%s\n", dbname); + // keep progress information and restart subscription + taos_unsubscribe(tsub, 1); + taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0, 'china');"); + tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 24); + + // keep progress information and continue previous subscription + taos_unsubscribe(tsub, 1); + tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + // don't keep progress information and continue previous subscription + taos_unsubscribe(tsub, 0); + tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 24); + + // single meter subscription + + taos_unsubscribe(tsub, 0); + tsub = taos_subscribe(taos, 0, "test", "select * from t0;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 5); + + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 1); + + taos_unsubscribe(tsub, 0); +} + + +int main(int argc, char *argv[]) { + const char* host = "127.0.0.1"; + const char* user = "root"; + const char* passwd = "taosdata"; + const char* sql = "select * from meters;"; + const char* topic = "test-multiple"; + int async = 1, restart = 0, keep = 1, test = 0, blockFetch = 0; + + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-h=", 3) == 0) { + host = argv[i] + 3; + continue; + } + if (strncmp(argv[i], "-u=", 3) == 0) { + user = argv[i] + 3; + continue; + } + if (strncmp(argv[i], "-p=", 3) == 0) { + passwd = argv[i] + 3; + continue; + } + if (strcmp(argv[i], "-sync") == 0) { + async = 0; + continue; + } + if (strcmp(argv[i], "-restart") == 0) { + restart = 1; + continue; + } + if (strcmp(argv[i], "-single") == 0) { + sql = "select * from t0;"; + topic = "test-single"; + continue; + } + if (strcmp(argv[i], "-nokeep") == 0) { + keep = 0; + continue; + } + if (strncmp(argv[i], "-sql=", 5) == 0) { + sql = argv[i] + 5; + topic = "test-custom"; + continue; + } + if (strcmp(argv[i], "-test") == 0) { + test = 1; + continue; + } + if (strcmp(argv[i], "-block-fetch") == 0) { + blockFetch = 1; + continue; + } + } + + // init TAOS + taos_init(); + + TAOS* taos = taos_connect(host, user, passwd, "test", 0); + if (taos == NULL) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); exit(1); } - TAOS_FIELD *fields = taos_fetch_subfields(tsub); - int fcount = taos_subfields_count(tsub); + if (test) { + run_test(taos); + taos_close(taos); + exit(0); + } + + TAOS_SUB* tsub = NULL; + if (async) { + // create an asynchronized subscription, the callback function will be called every 1s + tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); + } else { + // create an synchronized subscription, need to call 'taos_consume' manually + tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); + } - printf("start to retrieve data\n"); - printf("please use other taos client, insert rows into %s.%s\n", dbname, table); - while ( 1 ) { - row = taos_consume(tsub); - if ( row == NULL ) break; + if (tsub == NULL) { + printf("failed to create subscription.\n"); + exit(0); + } - taos_print_row(temp, row, fields, fcount); - printf("%s\n", temp); + if (async) { + getchar(); + } else while(1) { + TAOS_RES* res = taos_consume(tsub); + if (res == NULL) { + printf("failed to consume data."); + break; + } else { + print_result(res, blockFetch); + getchar(); + } } - taos_unsubscribe(tsub); + taos_unsubscribe(tsub, keep); + taos_close(taos); return 0; } -