diff --git a/CMakeLists.txt b/CMakeLists.txt index dcad242a303f4546e93e1e86e94c9d613c3b0de8..9c446b19a548e2f2f84d147d59327bee66d56caa 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,21 +38,56 @@ IF (NOT DEFINED TD_CLUSTER) # Set macro definitions according to os platform SET(TD_LINUX_64 FALSE) SET(TD_LINUX_32 FALSE) + SET(TD_ARM FALSE) SET(TD_ARM_64 FALSE) SET(TD_ARM_32 FALSE) SET(TD_MIPS_64 FALSE) SET(TD_DARWIN_64 FALSE) SET(TD_WINDOWS_64 FALSE) + # if generate ARM version: + # cmake -DARMVER=arm32 .. or cmake -DARMVER=arm64 + IF (${ARMVER} MATCHES "arm32") + SET(TD_ARM TRUE) + SET(TD_ARM_32 TRUE) + ADD_DEFINITIONS(-D_TD_ARM_) + ADD_DEFINITIONS(-D_TD_ARM_32_) + ELSEIF (${ARMVER} MATCHES "arm64") + SET(TD_ARM TRUE) + SET(TD_ARM_64 TRUE) + ADD_DEFINITIONS(-D_TD_ARM_) + ADD_DEFINITIONS(-D_TD_ARM_64_) + ENDIF () + + IF (TD_ARM) + ADD_DEFINITIONS(-D_TD_ARM_) + IF (TD_ARM_32) + ADD_DEFINITIONS(-D_TD_ARM_32_) + ELSEIF (TD_ARM_64) + ADD_DEFINITIONS(-D_TD_ARM_64_) + ELSE () + EXIT () + ENDIF () + ENDIF () + IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") IF (${CMAKE_SIZEOF_VOID_P} MATCHES 8) SET(TD_LINUX_64 TRUE) SET(TD_OS_DIR ${TD_COMMUNITY_DIR}/src/os/linux) ADD_DEFINITIONS(-D_M_X64) MESSAGE(STATUS "The current platform is Linux 64-bit") - ELSE () + ELSEIF (${CMAKE_SIZEOF_VOID_P} MATCHES 4) + IF (TD_ARM) SET(TD_LINUX_32 TRUE) - MESSAGE(FATAL_ERROR "The current platform is Linux 32-bit, not supported yet") + SET(TD_OS_DIR ${TD_COMMUNITY_DIR}/src/os/linux) + #ADD_DEFINITIONS(-D_M_IX86) + MESSAGE(STATUS "The current platform is Linux 32-bit") + ELSE () + MESSAGE(FATAL_ERROR "The current platform is Linux 32-bit, but no ARM not supported yet") + EXIT () + ENDIF () + ELSE () + MESSAGE(FATAL_ERROR "The current platform is Linux neither 32-bit nor 64-bit, not supported yet") EXIT () ENDIF () ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") @@ -90,7 +125,9 @@ IF (NOT DEFINED TD_CLUSTER) # debug flag # # ADD_DEFINITIONS(-D_CHECK_HEADER_FILE_) - # ADD_DEFINITIONS(-D_TAOS_MEM_TEST_) + IF (${MEM_CHECK} MATCHES "true") + ADD_DEFINITIONS(-DTAOS_MEM_CHECK) + ENDIF () IF (TD_CLUSTER) ADD_DEFINITIONS(-DCLUSTER) @@ -102,13 +139,27 @@ IF (NOT DEFINED TD_CLUSTER) IF (TD_LINUX_64) SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") - IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + IF (NOT TD_ARM) + IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + ELSE () + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + ENDIF () ELSE () - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + ENDIF () + ADD_DEFINITIONS(-DLINUX) + ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) + ELSEIF (TD_LINUX_32) + IF (NOT TD_ARM) + EXIT () ENDIF () + SET(DEBUG_FLAGS "-O0 -DDEBUG") + SET(RELEASE_FLAGS "-O0") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) + ADD_DEFINITIONS(-DUSE_LIBICONV) ELSEIF (TD_WINDOWS_64) SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE) SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") @@ -120,7 +171,7 @@ IF (NOT DEFINED TD_CLUSTER) ADD_DEFINITIONS(-DPTW32_BUILD) ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) ELSEIF (TD_DARWIN_64) - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-unused-variable -Wno-bitfield-constant-conversion") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") ADD_DEFINITIONS(-DDARWIN) @@ -169,6 +220,14 @@ IF (NOT DEFINED TD_CLUSTER) INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") + ELSEIF (TD_LINUX_32) + IF (NOT TD_ARM) + EXIT () + ENDIF () + SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") + INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") + INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") + INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") ELSEIF (TD_WINDOWS_64) SET(CMAKE_INSTALL_PREFIX C:/TDengine) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) diff --git a/README.md b/README.md index c82cda6f3cafe148c317b132edcb7041137670e0..fddd7d31322911947815ba8589374666b88e5d69 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,18 @@ sudo apt-get install maven ``` Build TDengine: -```cmd +``` mkdir build && cd build cmake .. && cmake --build . ``` +if compiling on an arm64 processor, you need add one parameter: + +```cmd +cmake .. -DARMVER=arm64 && cmake --build . +``` + # Quick Run To quickly start a TDengine server after building, run the command below in terminal: ```cmd diff --git a/documentation/tdenginedocs-cn/connector/index.html b/documentation/tdenginedocs-cn/connector/index.html index fca343e977fa48fc9b1a6fc4bb55e23c0056fe7b..3167c1521f099f8acd7ae237cc37bd5867ee209a 100644 --- a/documentation/tdenginedocs-cn/connector/index.html +++ b/documentation/tdenginedocs-cn/connector/index.html @@ -114,23 +114,84 @@ public Connection getConn() throws Exception{

对于TDengine操作的报错信息,用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码,请参考TDengine提供的使用示范项目JDBCDemo

Python Connector

-

Python客户端安装

-

用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装:

-

pip install src/connector/python/python2/

-

-

pip install src/connector/python/python3/

-

如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。

-

Python客户端接口

-

在使用TDengine的python接口时,需导入TDengine客户端模块:

-
import taos 
-

用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法:

+

安装准备

+
  • 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端
  • +
  • 已安装python 2.7 or >= 3.4
  • +
  • 已安装pip
  • +

    安装

    +

    Linux

    +

    用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包, 然后通过pip命令安装

    +
    pip install src/connector/python/linux/python2/
    +

    或者

    +
    pip install src/connector/python/linux/python3/
    +

    Windows

    +

    在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python2\
    +

    或者

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python3\
    +

    * 如果机器上没有pip命令,用户可将src/connector/python/windows/python3或src/connector/python/windows/python2下的taos文件夹拷贝到应用程序的目录使用。

    +

    使用

    +

    代码示例

    +
  • 导入TDengine客户端模块:
  • +
    import taos 
    +
  • 获取连接
  • +
    
    +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
    +c1 = conn.cursor()
    +
    +

    * host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录

    +
  • 写入数据
  • +
    
    +import datetime
    + 
    +# 创建数据库
    +c1.execute('create database db')
    +c1.execute('use db')
    +# 建表
    +c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
    +# 插入数据
    +start_time = datetime.datetime(2019, 11, 1)
    +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
    +# 批量插入数据
    +time_interval = datetime.timedelta(seconds=60)
    +sqlcmd = ['insert into tb values']
    +for irow in range(1,11):
    +  start_time += time_interval
    +  sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
    +affected_rows = c1.execute(' '.join(sqlcmd))
    +
    +
  • 查询数据
  • +
    +c1.execute('select * from tb')
    +# 拉取查询结果
    +data = c1.fetchall()
    +# 返回的结果是一个列表,每一行构成列表的一个元素
    +numOfRows = c1.rowcount
    +numOfCols = c1.descriptions
    +for irow in range(numOfRows):
    +  print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
    +  
    +# 直接使用cursor 循环拉取查询结果
    +c1.execute('select * from tb')
    +for data in c1:
    +  print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
    +
    +
  • 关闭连接
  • +
    +c1.close()
    +conn.close()
    +
    +

    帮助信息

    +

    用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法:

    RESTful Connector

    为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。

    diff --git a/documentation/tdenginedocs-cn/getting-started/index.html b/documentation/tdenginedocs-cn/getting-started/index.html index d7e8fe311c7ad9a78605163903fa0f9378c70544..d7d5d8540c6c46bbf5210677339c2ee202a7ec86 100644 --- a/documentation/tdenginedocs-cn/getting-started/index.html +++ b/documentation/tdenginedocs-cn/getting-started/index.html @@ -28,7 +28,7 @@

    在TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例:

    create database db;
     use db;
    -create table t (ts timestamp, cdata int);
    +create table t (ts timestamp, speed int);
     insert into t values ('2019-07-15 00:00:00', 10);
     insert into t values ('2019-07-15 01:00:00', 20);
     select * from t;
    @@ -63,7 +63,7 @@ Query OK, 2 row(s) in set (0.001700s)

    主要功能

    TDengine的核心功能是时序数据库。除此之外,为减少研发的复杂度、系统维护的难度,TDengine还提供缓存、消息队列、订阅、流式计算等功能。更详细的功能如下:

    TDengine是专为物联网、车联网、工业互联网、运维监测等场景优化设计的时序数据处理引擎。与其他方案相比,它的插入查询速度都快10倍以上。单核一秒钟就能插入100万数据点,读出1000万数据点。由于采用列式存储和优化的压缩算法,存储空间不及普通数据库的1/10.

    深入了解TDengine

    -

    请继续阅读文档来深入了解TDengine。

    回去 \ No newline at end of file +

    请继续阅读文档来深入了解TDengine。

    回去 diff --git a/documentation/tdenginedocs-cn/super-table/index.html b/documentation/tdenginedocs-cn/super-table/index.html index 5ee9587b2f0d8f76ed3fe973a7a7ea8c3498b2a6..828a69bb0ceaaefcc526e95042319d565e841d2d 100644 --- a/documentation/tdenginedocs-cn/super-table/index.html +++ b/documentation/tdenginedocs-cn/super-table/index.html @@ -24,7 +24,7 @@ tags (location binary(20), type int)

    说明:

    1. TAGS列总长度不能超过512 bytes;
    2. -
    3. TAGS列的数据类型不能是timestamp和nchar类型;
    4. +
    5. TAGS列的数据类型不能是timestamp类型;
    6. TAGS列名不能与其他列名相同;
    7. TAGS列名不能为预留关键字.
  • 显示已创建的超级表

    @@ -40,7 +40,7 @@ tags (location binary(20), type int)

    统计属于某个STable并满足查询条件的子表的数量

  • 写数据时自动建子表

    -

    在某些特殊场景中,用户在写数据时并不确定某个设备的表是否存在,此时可使用自动建表语法来实现写入数据时里用超级表定义的表结构自动创建不存在的子表,若该表已存在则不会建立新表。注意:自动建表语句只能自动建立子表而不能建立超级表,这就要求超级表已经被事先定义好。自动建表语法跟insert/import语法非常相似,唯一区别是语句中增加了超级表和标签信息。具体语法如下:

    +

    在某些特殊场景中,用户在写数据时并不确定某个设备的表是否存在,此时可使用自动建表语法来实现写入数据时用超级表定义的表结构自动创建不存在的子表,若该表已存在则不会建立新表。注意:自动建表语句只能自动建立子表而不能建立超级表,这就要求超级表已经被事先定义好。自动建表语法跟insert/import语法非常相似,唯一区别是语句中增加了超级表和标签信息。具体语法如下:

    INSERT INTO <tb_name> USING <stb_name> TAGS (<tag1_value>, ...) VALUES (field_value, ...) (field_value, ...) ...;

    向表tb_name中插入一条或多条记录,如果tb_name这张表不存在,则会用超级表stb_name定义的表结构以及用户指定的标签值(即tag1_value…)来创建名为tb_name新表,并将用户指定的值写入表中。如果tb_name已经存在,则建表过程会被忽略,系统也不会检查tb_name的标签是否与用户指定的标签值一致,也即不会更新已存在表的标签。

    INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<field1_value1>, ...) (<field1_value2>, ...) ... <tb_name2> USING <stb_name2> TAGS(<tag1_value2>, ...) VALUES (<field1_value1>, ...) ...;
    @@ -105,6 +105,6 @@ GROUP BY location, type

    查询仅位于北京以外地区的温度传感器最近24小时(24h)采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)和最低温度min(degree),将采集结果按照10分钟为周期进行聚合,并将结果按所处地域(location)和传感器类型(type)再次进行聚合。

    SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
     FROM thermometer
    -WHERE name<>'beijing' and ts>=now-1d
    +WHERE location<>'beijing' and ts>=now-1d
     INTERVAL(10M)
     GROUP BY location, type
    回去 diff --git a/documentation/tdenginedocs-cn/taos-sql/index.html b/documentation/tdenginedocs-cn/taos-sql/index.html index ec3e42d0901cf4730f54cc33d7a60890a64c2136..207bfe03fd41fb91322c34b754e07fd77711881e 100644 --- a/documentation/tdenginedocs-cn/taos-sql/index.html +++ b/documentation/tdenginedocs-cn/taos-sql/index.html @@ -359,9 +359,9 @@ SELECT function_list FROM tb_name SELECT function_list FROM stb_name [WHERE where_condition] - [GROUP BY tags] + [FILL ({ VALUE | PREV | NULL | LINEAR})] INTERVAL (interval) - [FILL ({ VALUE | PREV | NULL | LINEAR})] + [GROUP BY tags]

    All the error codes and error messages can be found in TSDBError.java . For a more detailed coding example, please refer to the demo project JDBCDemo in TDengine's code examples.

    Python Connector

    -

    Install TDengine Python client

    -

    Users can find python client packages in our source code directory src/connector/python. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use pip command to install:

    -
    pip install src/connector/python/python2/
    +

    Pre-requirement

    +
  • TDengine installed, TDengine-client installed if on Windows
  • +
  • python 2.7 or >= 3.4
  • +
  • pip installed
  • +

    Installation

    +

    Linux

    +

    Users can find python client packages in our source code directory src/connector/python. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use pip command to install:

    +
    pip install src/connector/python/linux/python2/

    or

    -
    pip install src/connector/python/python3/
    -

    If pip command is not installed on the system, users can choose to install pip or just copy the taos directory in the python client directory to the application directory to use.

    -

    Python client interfaces

    -

    To use TDengine Python client, import TDengine module at first:

    +
    pip install src/connector/python/linux/python3/
    +

    Windows

    +

    Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the cmd Windows command interface

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python2\
    +

    or

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python3\
    +

    * If pip command is not installed on the system, users can choose to install pip or just copy the taos directory in the python client directory to the application directory to use.

    +

    Usage

    +

    Examples

    +
  • import TDengine module at first:
  • import taos 
    +
  • get the connection
  • +
    
    +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
    +c1 = conn.cursor()
    +
    +

    * host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file

    +
  • insert records into the database
  • +
    
    +import datetime
    + 
    +# create a database
    +c1.execute('create database db')
    +c1.execute('use db')
    +# create a table
    +c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
    +# insert a record
    +start_time = datetime.datetime(2019, 11, 1)
    +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
    +# insert multiple records in a batch
    +time_interval = datetime.timedelta(seconds=60)
    +sqlcmd = ['insert into tb values']
    +for irow in range(1,11):
    +  start_time += time_interval
    +  sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
    +affected_rows = c1.execute(' '.join(sqlcmd))
    +
    +
  • query the database
  • +
    +c1.execute('select * from tb')
    +# fetch all returned results
    +data = c1.fetchall()
    +# data is a list of returned rows with each row being a tuple
    +numOfRows = c1.rowcount
    +numOfCols = c1.descriptions
    +for irow in range(numOfRows):
    +  print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
    +  
    +# use the cursor as an iterator to retrieve all returned results
    +c1.execute('select * from tb')
    +for data in c1:
    +  print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
    +
    +
  • close the connection
  • +
    +c1.close()
    +conn.close()
    +
    +

    Help information

    Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:

    TDengine is specially designed and optimized for time-series data processing in IoT, connected cars, Industrial IoT, IT infrastructure and application monitoring, and other scenarios. Compared with other solutions, it is 10x faster on insert/query speed. With a single-core machine, over 20K requestes can be processed, millions data points can be ingested, and over 10 million data points can be retrieved in a second. Via column-based storage and tuned compression algorithm for different data types, less than 1/10 storage space is required.

    Explore More on TDengine

    -

    Please read through the whole documentation to learn more about TDengine.

    Back \ No newline at end of file +

    Please read through the whole documentation to learn more about TDengine.

    Back diff --git a/documentation/tdenginedocs-en/super-table/index.html b/documentation/tdenginedocs-en/super-table/index.html index 3ebb0775b5e3faaa90281fb19cd027d9599da900..21e7669a19118fd0bfd9b15693ae38bc66edbb0b 100644 --- a/documentation/tdenginedocs-en/super-table/index.html +++ b/documentation/tdenginedocs-en/super-table/index.html @@ -6,9 +6,9 @@

    Like a table, you can create, show, delete and describe STables. Most query operations on tables can be applied to STable too, including the aggregation and selector functions. For queries on a STable, if no tags filter, the operations are applied to all the tables created via this STable. If there is a tag filter, the operations are applied only to a subset of the tables which satisfy the tag filter conditions. It will be very convenient to use tags to put devices into different groups for aggregation.

    Create a STable

    Similiar to creating a standard table, syntax is:

    -
    CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
    +
    CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type, ...) TAGS(tag_name tag_type, ...)

    New keyword "tags" is introduced, where tag_name is the tag name, and tag_type is the associated data type.

    -

    Note:

    +

    Note:

    1. The bytes of all tags together shall be less than 512
    2. Tag's data type can not be time stamp or nchar
    3. @@ -30,12 +30,12 @@ tags (location binary(20), type int) create table t4 using thermometer tags ('shanghai', 20); create table t5 using thermometer tags ('new york', 10);

      Aggregate Tables via STable

      -

      You can group a set of tables together by specifying the tags filter condition, then apply the aggregation operations. The result set can be grouped and ordered based on tag value. Syntax is:

      -
      SELECT function<field_name>,… 
      +

      You can group a set of tables together by specifying the tags filter condition, then apply the aggregation operations. The result set can be grouped and ordered based on tag value. Syntax is:

      +
      SELECT function<field_name>, ... 
        FROM <stable_name> 
      - WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
      + WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] ...
        INTERVAL (<time range>)
      - GROUP BY <tag_name>, <tag_name>…
      + GROUP BY <tag_name>, <tag_name> ... 
        ORDER BY <tag_name> <asc|desc>
        SLIMIT <group_limit>
        SOFFSET <group_offset>
      @@ -75,9 +75,9 @@ INTERVAL(10M)
      DROP TABLE <stable_name>

      To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.

      List the Associated Tables of a STable

      -
      SELECT TBNAME,[TAG_NAME,…] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)
      +
      SELECT TBNAME,[TAG_NAME, ...] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] ...)

      It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable.

      -
      SELECT COUNT(TBNAME) FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)
      +
      SELECT COUNT(TBNAME) FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] ...)

      The above SQL statement will list the number of tables in a STable, which satisfy the filter condition.

      Management of Tags

      You can add, delete and change the tags for a STable, and you can change the tag value of a table. The SQL commands are listed below.

      diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index ef2383f6bddaa07b6668c3682369ee29ba231fe4..46a2b04daa323ab672b60b90614a9665205184c8 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -8,7 +8,7 @@ C/C++ APIs are similar to the MySQL APIs. Applications should include TDengine h ```C #include ``` -Make sure TDengine library _libtaos.so_ is installed and use _-ltaos_ option to link the library when compiling. The return values of all APIs are _-1_ or _NULL_ for failure. +Make sure TDengine library _libtaos.so_ is installed and use _-ltaos_ option to link the library when compiling. In most cases, if the return value of an API is integer, it return _0_ for success and other values as an error code for failure; if the return value is pointer, then _NULL_ is used for failure. ### C/C++ sync API @@ -78,6 +78,51 @@ The 12 APIs are the most important APIs frequently used. Users can check _taos.h **Note**: The connection to a TDengine server is not multi-thread safe. So a connection can only be used by one thread. +### C/C++ parameter binding API + +TDengine also provides parameter binding APIs, like MySQL, only question mark `?` can be used to represent a parameter in these APIs. + +- `TAOS_STMT* taos_stmt_init(TAOS *taos)` + + Create a TAOS_STMT to represent the prepared statement for other APIs. + +- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)` + + Parse SQL statement _sql_ and bind result to _stmt_ , if _length_ larger than 0, its value is used to determine the length of _sql_, the API auto detects the actual length of _sql_ otherwise. + +- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)` + + Bind values to parameters. _bind_ points to an array, the element count and sequence of the array must be identical as the parameters of the SQL statement. The usage of _TAOS_BIND_ is same as _MYSQL_BIND_ in MySQL, its definition is as below: + + ```c + typedef struct TAOS_BIND { + int buffer_type; + void * buffer; + unsigned long buffer_length; // not used in TDengine + unsigned long *length; + int * is_null; + int is_unsigned; // not used in TDengine + int * error; // not used in TDengine + } TAOS_BIND; + ``` + +- `int taos_stmt_add_batch(TAOS_STMT *stmt)` + + Add bound parameters to batch, client can call `taos_stmt_bind_param` again after calling this API. Note this API only support _insert_ / _import_ statements, it returns an error in other cases. + +- `int taos_stmt_execute(TAOS_STMT *stmt)` + + Execute the prepared statement. This API can only be called once for a statement at present. + +- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` + + Acquire the result set of an executed statement. The usage of the result is same as `taos_use_result`, `taos_free_result` must be called after one you are done with the result set to release resources. + +- `int taos_stmt_close(TAOS_STMT *stmt)` + + Close the statement, release all resources. + + ### C/C++ async API In addition to sync APIs, TDengine also provides async APIs, which are more efficient. Async APIs are returned right away without waiting for a response from the server, allowing the application to continute with other tasks without blocking. So async APIs are more efficient, especially useful when in a poor network. @@ -153,56 +198,104 @@ For the time being, TDengine supports subscription on one table. It is implement ## Java Connector -### JDBC Interface +To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1]. + +Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver: + +* libtaos.so (Linux) + After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path. + +* taos.dll (Windows) + After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path. + +> Note: Please make sure that TDengine Windows client has been installed if developing on Windows. + +Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: +* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method. +* No support for transaction +* No support for union between tables +* No support for nested query,`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`. + +## Version list of TAOS-JDBCDriver and required TDengine and JDK + +| taos-jdbcdriver | TDengine | JDK | +| --- | --- | --- | +| 1.0.3 | 1.6.1.x or higher | 1.8.x | +| 1.0.2 | 1.6.1.x or higher | 1.8.x | +| 1.0.1 | 1.6.1.x or higher | 1.8.x | + +## DataType in TDengine and Java + +The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java: + +| TDengine | Java | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT, TINYINT |java.lang.Short | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | + +## How to get TAOS-JDBC Driver + +### maven repository -TDengine provides a JDBC driver `taos-jdbcdriver-x.x.x.jar` for Enterprise Java developers. TDengine's JDBC Driver is implemented as a subset of the standard JDBC 3.0 Specification and supports the most common Java development frameworks. The driver have been published to dependency repositories such as Sonatype Maven Repository, and users could refer to the following `pom.xml` configuration file. +taos-jdbcdriver has been published to [Sonatype Repository][1]: +* [sonatype][8] +* [mvnrepository][9] +* [maven.aliyun][10] + +Using the following pom.xml for maven projects ```xml - - - oss-sonatype - oss-sonatype - https://oss.sonatype.org/content/groups/public - - - com.taosdata.jdbc taos-jdbcdriver - 1.0.1 + 1.0.3 ``` -Please note the JDBC driver itself relies on a native library written in C. On a Linux OS, the driver relies on a `libtaos.so` native library, where .so stands for "Shared Object". After the successful installation of TDengine on Linux, `libtaos.so` should be automatically copied to `/usr/local/lib/taos` and added to the system's default search path. On a Windows OS, the driver relies on a `taos.dll` native library, where .dll stands for "Dynamic Link Library". After the successful installation of the TDengine client on Windows, the `taos-jdbcdriver.jar` file can be found in `C:/TDengine/driver/JDBC`; the `taos.dll` file can be found in `C:/TDengine/driver/C` and should have been automatically copied to the system's searching path `C:/Windows/System32`. +### JAR file from the source code -Developers can refer to the Oracle's official JDBC API documentation for detailed usage on classes and methods. However, there are some differences of connection configurations and supported methods in the driver implementation between TDengine and traditional relational databases. +After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated. -For database connections, TDengine's JDBC driver has the following configurable parameters in the JDBC URL. The standard format of a TDengine JDBC URL is: +## Usage -`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +### get the connection -where `{}` marks the required parameters and `[]` marks the optional. The usage of each parameter is pretty straightforward: +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` +> `6030` is the default port and `log` is the default database for system monitor. -* user - login user name for TDengine; by default, it's `root` -* password - login password; by default, it's `taosdata` -* charset - the client-side charset; by default, it's the operation system's charset -* cfgdir - the directory of TDengine client configuration file; by default it's `/etc/taos` on Linux and `C:\TDengine/cfg` on Windows -* locale - the language environment of TDengine client; by default, it's the operation system's locale -* timezone - the timezone of the TDengine client; by default, it's the operation system's timezone +A normal JDBC URL looks as follows: +`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` -All parameters can be configured at the time when creating a connection using the java.sql.DriverManager class, for example: +values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes: -```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.util.Properties; -import com.taosdata.jdbc.TSDBDriver; +* user:user name for login, defaultly root。 +* password:password for login,defaultly taosdata。 +* charset:charset for client,defaultly system charset +* cfgdir:log directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。 +* locale:language for client,defaultly system locale。 +* timezone:timezone for client,defaultly system timezone。 + +The options above can be configures (`ordered by priority`): +1. JDBC URL + As explained above. +2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) +```java public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); @@ -215,42 +308,294 @@ public Connection getConn() throws Exception{ } ``` -Except `cfgdir`, all the parameters listed above can also be configured in the configuration file. The properties specified when calling DriverManager.getConnection() has the highest priority among all configuration methods. The JDBC URL has the second-highest priority, and the configuration file has the lowest priority. The explicitly configured parameters in a method with higher priorities always overwrite that same parameter configured in methods with lower priorities. For example, if `charset` is explicitly configured as "UTF-8" in the JDBC URL and "GKB" in the `taos.cfg` file, then "UTF-8" will be used. +3. Configuration file (taos.cfg) + + Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows +```properties +# client default username +# defaultUser root + +# client default password +# defaultPass taosdata + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` +> More options can refer to [client configuration][13] + +### Create databases and tables + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` +> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables + +### Insert data + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` +> _now_ is the server time. +> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year). + +### Query database + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` +> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results. + +### Close all + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` +> `please make sure the connection is closed to avoid the error like connection leakage` + +## Using connection pool + +**HikariCP** + +* dependence in pom.xml: +```xml + + com.zaxxer + HikariCP + 3.4.1 + +``` + +* Examples: +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMinimumIdle(3); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool + config.setIdleTimeout(60000); // max idle time for recycle idle connection + config.setConnectionTestQuery("describe log.dn"); //validation query + config.setValidationTimeout(3000); //validation query timeout + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool. +> More instructions can refer to [User Guide][5] + +**Druid** + +* dependency in pom.xml: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* Examples: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> More instructions can refer to [User Guide][6] + +**Notice** +* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`. + +As follows,`1` will be returned if `select server_status()` is successfully executed。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +## Integrated with framework + +* Please refer to [SpringJdbcTemplate][11] if using taos-jdbcdriver in Spring JdbcTemplate +* Please refer to [springbootdemo][12] if using taos-jdbcdriver in Spring JdbcTemplate -Although the JDBC driver is implemented following the JDBC standard as much as possible, there are major differences between TDengine and traditional databases in terms of data models that lead to the differences in the driver implementation. Here is a list of head-ups for developers who have plenty of experience on traditional databases but little on TDengine: +## FAQ + +* java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **Cause**:The application program cannot find Library function _taos_ + + **Answer**:Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux. + +* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **Cause**:Currently TDengine only support 64bit JDK + + **Answer**:re-install 64bit JDK. -* TDengine does NOT support updating or deleting a specific record, which leads to some unsupported methods in the JDBC driver -* TDengine currently does not support `join` or `union` operations, and thus, is lack of support for associated methods in the JDBC driver -* TDengine supports batch insertions which are controlled at the level of SQL statement writing instead of API calls -* TDengine doesn't support nested queries and neither does the JDBC driver. Thus for each established connection to TDengine, there should be only one open result set associated with it +* For other questions, please refer to [Issues][7] -All the error codes and error messages can be found in `TSDBError.java` . For a more detailed coding example, please refer to the demo project `JDBCDemo` in TDengine's code examples. ## Python Connector -### Install TDengine Python client +### Pre-requirement +* TDengine installed, TDengine-client installed if on Windows [(Windows TDengine client installation)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口) +* python 2.7 or >= 3.4 +* pip installed + +### Installation +#### Linux -Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use _pip_ command to install: +Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use _pip_ command to install: ```cmd -pip install src/connector/python/[linux|Windows]/python2/ +pip install src/connector/python/linux/python3/ ``` or ``` -pip install src/connector/python/[linux|Windows]/python3/ +pip install src/connector/python/linux/python2/ ``` +#### Windows +Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the _cmd_ Windows command interface +``` +cd C:\TDengine\connector\python\windows +pip install python3\ +``` +or +``` +cd C:\TDengine\connector\python\windows +pip install python2\ +``` +*If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use. -If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use. - -### Python client interfaces - -To use TDengine Python client, import TDengine module at first: +### Usage +#### Examples +* import TDengine module ```python import taos ``` +* get the connection +```python +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") +c1 = conn.cursor() +``` +*host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file +* insert records into the database +```python +import datetime + +# create a database +c1.execute('create database db') +c1.execute('use db') +# create a table +c1.execute('create table tb (ts timestamp, temperature int, humidity float)') +# insert a record +start_time = datetime.datetime(2019, 11, 1) +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time) +# insert multiple records in a batch +time_interval = datetime.timedelta(seconds=60) +sqlcmd = ['insert into tb values'] +for irow in range(1,11): + start_time += time_interval + sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2)) +affected_rows = c1.execute(' '.join(sqlcmd)) +``` +* query the database +```python +c1.execute('select * from tb') +# fetch all returned results +data = c1.fetchall() +# data is a list of returned rows with each row being a tuple +numOfRows = c1.rowcount +numOfCols = len(c1.description) +for irow in range(numOfRows): + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]) + +# use the cursor as an iterator to retrieve all returned results +c1.execute('select * from tb') +for data in c1: + print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) +``` +* close the connection +```python +c1.close() +conn.close() +``` +#### Help information Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below: @@ -524,3 +869,17 @@ An example of using the NodeJS connector to create a table with weather data and An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE + diff --git a/documentation/webdocs/markdowndocs/Super Table-ch.md b/documentation/webdocs/markdowndocs/Super Table-ch.md index 9267e00a70454d15431e1aa8d7ae8a52d5e4965a..e75a8d46c38e3501b2c17b4a05b68c0e8fa4a707 100644 --- a/documentation/webdocs/markdowndocs/Super Table-ch.md +++ b/documentation/webdocs/markdowndocs/Super Table-ch.md @@ -54,7 +54,7 @@ STable从属于库,一个STable只属于一个库,但一个库可以有一 说明: 1. TAGS列总长度不能超过512 bytes; - 2. TAGS列的数据类型不能是timestamp和nchar类型; + 2. TAGS列的数据类型不能是timestamp; 3. TAGS列名不能与其他列名相同; 4. TAGS列名不能为预留关键字. @@ -169,7 +169,7 @@ SELECT function,… 以温度传感器采集时序数据作为例,示范STable的使用。 在这个例子中,对每个温度计都会建立一张表,表名为温度计的ID,温度计读数的时刻记为ts,采集的值记为degree。通过tags给每个采集器打上不同的标签,其中记录温度计的地区和类型,以方便我们后面的查询。所有温度计的采集量都一样,因此我们用STable来定义表结构。 -###定义STable表结构并使用它创建子表 +###1:定义STable表结构并使用它创建子表 创建STable语句如下: @@ -189,7 +189,7 @@ CREATE TABLE therm4 USING thermometer TAGS ('shanghai', 3); 其中therm1,therm2,therm3,therm4是超级表thermometer四个具体的子表,也即普通的Table。以therm1为例,它表示采集器therm1的数据,表结构完全由thermometer定义,标签location=”beijing”, type=1表示therm1的地区是北京,类型是第1类的温度计。 -###写入数据 +###2:写入数据 注意,写入数据时不能直接对STable操作,而是要对每张子表进行操作。我们分别向四张表therm1,therm2, therm3, therm4写入一条数据,写入语句如下: @@ -200,7 +200,7 @@ INSERT INTO therm3 VALUES ('2018-01-01 00:00:00.000', 24); INSERT INTO therm4 VALUES ('2018-01-01 00:00:00.000', 23); ``` -### 按标签聚合查询 +###3:按标签聚合查询 查询位于北京(beijing)和天津(tianjing)两个地区的温度传感器采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)、最低温度min(degree),并将结果按所处地域(location)和传感器类型(type)进行聚合。 @@ -211,14 +211,14 @@ WHERE location='beijing' or location='tianjin' GROUP BY location, type ``` -### 按时间周期聚合查询 +###4:按时间周期聚合查询 查询仅位于北京以外地区的温度传感器最近24小时(24h)采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)和最低温度min(degree),将采集结果按照10分钟为周期进行聚合,并将结果按所处地域(location)和传感器类型(type)再次进行聚合。 ```mysql SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree) FROM thermometer -WHERE name<>'beijing' and ts>=now-1d +WHERE location<>'beijing' and ts>=now-1d INTERVAL(10M) GROUP BY location, type ``` diff --git a/documentation/webdocs/markdowndocs/Super Table.md b/documentation/webdocs/markdowndocs/Super Table.md index 79a1650924373d4fbaa483115694aee1e753d203..609dd11bd278da3398330fa33f857fac65ffb3d5 100644 --- a/documentation/webdocs/markdowndocs/Super Table.md +++ b/documentation/webdocs/markdowndocs/Super Table.md @@ -23,7 +23,7 @@ New keyword "tags" is introduced, where tag_name is the tag name, and tag_type i Note: 1. The bytes of all tags together shall be less than 512 -2. Tag's data type can not be time stamp or nchar +2. Tag's data type can not be time stamp 3. Tag name shall be different from the field name 4. Tag name shall not be the same as system keywords 5. Maximum number of tags is 6 @@ -102,7 +102,7 @@ List the number of records, average, maximum, and minimum temperature every 10 m ```mysql SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree) FROM thermometer -WHERE name='beijing' and type=10 and ts>=now-1d +WHERE location='beijing' and type=10 and ts>=now-1d INTERVAL(10M) ``` diff --git a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md index a6a1b4872afb82499352592599e7919646575ff4..347ac4f21f00202a3848cf4a48694dcbd64cb274 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md @@ -18,6 +18,7 @@ TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句 - 插入记录时,如果时间戳为0,插入数据时使用服务器当前时间 - Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 - 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据 +- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d) TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。 @@ -424,9 +425,9 @@ SELECT function_list FROM tb_name SELECT function_list FROM stb_name [WHERE where_condition] - [GROUP BY tags] INTERVAL (interval) [FILL ({ VALUE | PREV | NULL | LINEAR})] + [GROUP BY tags] ``` - 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。 diff --git a/documentation/webdocs/markdowndocs/TAOS SQL.md b/documentation/webdocs/markdowndocs/TAOS SQL.md index 2431514fa5e1f02738eaaed2e3440c315afeda12..870529417fbb4dd9dd1e73bb253962e9293e94f4 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL.md @@ -474,9 +474,9 @@ SELECT function_list FROM tb_name SELECT function_list FROM stb_name [WHERE where_condition] - [GROUP BY tags] INTERVAL (interval) [FILL ({ VALUE | PREV | NULL | LINEAR})] + [GROUP BY tags] ``` The downsampling time window is defined by `interval`, which is at least 10 milliseconds. The query returns a new series of downsampled data that has a series of fixed timestamps with an increment of `interval`. diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index ed822fb8c95b752b9844217915e0af34a3817643..b59c0435a880dfd34bbe7903b5c3f3c81197c1fa 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -2,15 +2,15 @@ ## 文件目录结构 -安装TDengine后,默认会在操作系统中生成下列目录或文件: +安装TDengine的过程中,安装程序将在操作系统中创建以下目录或文件: | 目录/文件 | 说明 | | ---------------------- | :------------------------------------------------| -| /etc/taos/taos.cfg | TDengine默认[配置文件] | -| /usr/local/taos/driver | TDengine动态链接库目录 | -| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. | -| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 | -| /usr/local/taos/bin | TDengine可执行文件目录 | +| /etc/taos/taos.cfg | 默认[配置文件] | +| /usr/local/taos/driver | 动态链接库目录 | +| /var/lib/taos | 默认数据文件目录,可通过[配置文件]修改位置. | +| /var/log/taos | 默认日志文件目录,可通过[配置文件]修改位置 | +| /usr/local/taos/bin | 可执行文件目录 | ### 可执行文件 @@ -19,33 +19,89 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 - _taosd_:TDengine服务端可执行文件 - _taos_: TDengine Shell可执行文件 - _taosdump_:数据导出工具 -- *rmtaos*: 一个卸载TDengine的脚本, 请谨慎执行 +- *rmtaos*: 卸载TDengine的脚本, 该脚本会删除全部的程序和数据文件。请务必谨慎执行,如非必须不建议使用。 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录 ## 服务端配置 -TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录,可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。 +TDengine系统后台服务程序是`taosd`,其启动时候读取的配置文件缺省目录是`/etc/taos`。可以通过命令行执行参数-c指定配置文件目录,比如 +``` +taosd -c /home/user +``` +指定`taosd`启动的时候读取`/home/user`目录下的配置文件taos.cfg。 下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。**注意:配置修改后,需要重启*taosd*服务才能生效。** -- internalIp: 对外提供服务的IP地址,默认取第一个IP地址 -- mgmtShellPort:管理节点与客户端通信使用的TCP/UDP端口号(默认值是6030)。此端口号在内向后连续的5个端口都会被UDP通信占用,即UDP占用[6030-6034],同时TCP通信也会使用端口[6030]。 -- vnodeShellPort:数据节点与客户端通信使用的TCP/UDP端口号(默认值是6035)。此端口号在内向后连续的5个端口都会被UDP通信占用,即UDP占用[6035-6039],同时TCP通信也会使用端口[6035] -- httpPort:数据节点对外提供RESTful服务使用TCP,端口号[6020] -- dataDir: 数据文件目录,缺省是/var/lib/taos -- maxUsers:用户的最大数量 -- maxDbs:数据库的最大数量 -- maxTables:数据表的最大数量 -- enableMonitor: 系统监测标志位,0:关闭,1:打开 -- logDir: 日志文件目录,缺省是/var/log/taos -- numOfLogLines:日志文件的最大行数 -- debugFlag: 系统debug日志开关,131:仅错误和报警信息,135:所有 +**internalIp** +- 默认值:操作配置的IP地址列表中的第一个IP地址 + +对外提供服务的IP地址。 + +**mgmtShellPort** +- 默认值: _6030_ + +数据库服务中管理节点与客户端通信使用的TCP/UDP端口号。 +> 端口范围 _6030_ - _6034_ 均用于UDP通讯。此外,还使用端口 _6030_ 用于TCP通讯。 + +**vnodeShellPort** +- 默认值: _6035_ + +数据节点与客户端通信使用的TCP/UDP端口号。 +> 端口范围 _6035_ - _6039_ 的5个端口用于UDP通信。此外,还使用端口 _6035_ 用于TCP通讯。 + +**httpPort** +- 默认值: _6020_ + +RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。 + +**dataDir** +- 默认值:/var/lib/taos + +数据文件目录,所有的数据文件都将写入该目录。 + +**logDir** +- 默认值:/var/log/taos + +日志文件目录,客户端和服务器的运行日志将写入该目录。 + +**maxUsers** +- 默认值:10,000 + +系统允许创建用户数量的上限 + +**maxDbs** +- 默认值:1,000 + +系统允许的创建数据库的上限 + +**maxTables** +- 默认值:650,000 + +系统允许创建数据表的上限。 +>系统能够创建的表受到多种因素的限制,单纯地增大该参数并不能直接增加系统能够创建的表数量。例如,由于每个表创建均需要消耗一定量的缓存空间,系统可用内存一定的情况下,创建表的总数的上限是一个固定的值。 + +**monitor** +- 默认值:1(激活状态) + +服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。0表示关闭监控服务,1表示激活监控服务。 + +**numOfLogLines** +- 默认值:10,000,000 + +单个日志文件允许的最大行数(10,000,000行)。 + +**debugFlag** +- 默认值:131(仅输出错误和警告信息) + +系统(服务端和客户端)运行日志开关: +- 131 仅输出错误和警告信息 +- 135 输入错误(ERROR)、警告(WARN)、信息(Info) 不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数: -- days:一个数据文件覆盖的时间长度,单位为天 -- keep:数据库中数据保留的天数 +- days:数据文件存储数据的时间跨度,单位为天 +- keep:数据保留的天数 - rows: 文件块中记录条数 - comp: 文件压缩标志位,0:关闭,1:一阶段压缩,2:两阶段压缩 - ctime:数据从写入内存到写入硬盘的最长时间间隔,单位为秒 @@ -66,19 +122,139 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 ## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos(Windows平台上为taos.exe)。与服务端程序一样,也可以通过设置taos.cfg来配置`taos`启动和运行的配置项。启动的时候如果不指定taos加载配置文件路径,默认读取`/etc/taos/`路径下的`taos.cfg`文件。指定配置文件来启动`taos`的命令如下: + +``` +taos -c /home/cfg/ +``` +**注意:启动设置的是配置文件所在目录,而不是配置文件本身** + +如果`/home/cfg/`目录下没有配置文件,程序会继续启动并打印如下告警信息: +```plaintext +Welcome to the TDengine shell from linux, client version:1.6.4.0 +option file:/home/cfg/taos.cfg not found, all options are set to system default +``` +更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 + +客户端配置参数说明 + +**masterIP** +- 默认值:127.0.0.1 + +客户端连接的TDengine服务器IP地址,如果不设置默认连接127.0.0.1的节点。以下两个命令等效: +``` +taos +taos -h 127.0.0.1 +``` +其中的IP地址是从配置文件中读取的masterIP的值。 + +**locale** +- 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型`nchar`。写入`nchar`字段的数据将统一采用`UCS4-LE`格式进行编码并发送到服务器。需要注意的是,**编码正确性**是客户端来保证。因此,如果用户想要正常使用`nchar`字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。 + +客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为`UTF-8`,部分中文系统编码则可能是`GB18030`或`GBK`等。在docker环境中默认的编码是`POSIX`。在中文版Windows系统中,编码则是`CP936`。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证`nchar`中的数据正确转换为`UCS4-LE`编码格式。 + +在 Linux 中 locale 的命名规则为: +`<语言>_<地区>.<字符集编码>` +如:`zh_CN.UTF-8`,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与Mac OSX系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数`charset`来指定字符编码。在Linux系统中也可以使用charset来指定字符编码。 -客户端配置参数列表及解释 +**charset** +- 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 -- masterIP:客户端默认发起请求的服务器的IP地址 -- charset:指明客户端所使用的字符集,默认值为UTF-8。TDengine存储nchar类型数据时使用的是unicode存储,因此客户端需要告知服务自己所使用的字符集,也即客户端所在系统的字符集。 -- locale:设置系统语言环境。Linux上客户端与服务端共享 -- defaultUser:默认登录用户,默认值root -- defaultPass:默认登录密码,默认值taosdata +如果配置文件中不设置`charset`,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,**则中断启动过程**。 -TCP/UDP端口,以及日志的配置参数,与server的配置参数完全一样。 +在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如: +``` +locale zh_CN.UTF-8 +``` +在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,`taos`默认设置为字符编码为`CP936`。其等效在配置文件中添加如下配置: +``` +charset CP936 +``` +如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。 + +在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。 +``` +locale zh_CN.UTF-8 +charset GBK +``` +则`charset`的有效值是`GBK`。 +``` +charset GBK +locale zh_CN.UTF-8 +``` +`charset`的有效值是`UTF-8`。 + +**sockettype** +- 默认值:UDP + +客户端连接服务端的套接字的方式,可以使用`UDP`和`TCP`两种配置。 +在客户端和服务端之间的通讯需要经过恶劣的网络环境下(如公共网络、互联网)、客户端与数据库服务端连接不稳定(由于MTU的问题导致UDP丢包)的情况下,可以将连接的套接字类型调整为`TCP` + +>注意:客户端套接字的类型需要和服务端的套接字类型相同,否则无法连接数据库。 + +**compressMsgSize** +- 默认值:-1(不压缩) + +客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值,默认值为-1(不压缩)。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。在配置文件中增加如下配置项即可: +``` +compressMsgSize 64330 +``` +如果配置项设置为0,`compressMsgSize 0`表示对所有的消息均进行压缩。 + +**timezone** +- 默认值:从系统中动态获取当前的时区设置 + +客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine采用Unix时间戳([Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time))来记录和存储时间戳。Unix时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的Unix时间戳,需要设置正确的时区。 + +在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如: +``` +timezone UTC-8 +timezone GMT-8 +timezone Asia/Shanghai +``` +均是合法的设置东八区时区的格式。 + + +时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词`now`的解析)产生影响。例如: +``` +SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; +``` +在东八区,SQL语句等效于 +``` +SELECT count(*) FROM table_name WHERE TS<1554955268000; +``` +在UTC时区,SQL语句等效于 +``` +SELECT count(*) FROM table_name WHERE TS<1554984068000; +``` +为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,`2013-04-12T15:52:01.123+08:00`或者ISO-8601格式时间戳字符串`2013-04-12T15:52:01.123+0800`。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。 + +**defaultUser** +- 默认值:root + +登录用户名,客户端登录的时候,如果不指定用户名,则自动使用该用户名登录。默认情况下,以下的两个命令等效 +``` +taos +taos -u root +``` +用户名为从配置中读取的`defaultUser`配置项。如果更改`defaultUser abc`,则以下两个命令等效: +``` +taos +taos -u abc +``` + +**defaultPass** +- 默认值:taosdata + +登录用户名,客户端登录的时候,如果不指定密码,则自动使用该密码登录。默认情况下,以下的两个命令等效 +``` +taos +taos -ptaosdata +``` -启动taos时,你也可以从命令行指定IP地址、端口号,用户名和密码,否则就从taos.cfg读取。 +TCP/UDP端口,以及日志的配置参数,与server的配置参数完全一样。使用命令`taos -?` 可查看`taos`允许的可选项。 ## 用户管理 @@ -191,6 +367,6 @@ KILL STREAM ## 系统监控 -TDengine启动后,会自动创建一个监测数据库SYS,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。 +TDengine启动后,会自动创建一个监测数据库`LOG`,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果。 -这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 \ No newline at end of file +这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。 diff --git a/documentation/webdocs/markdowndocs/connector-ch.md b/documentation/webdocs/markdowndocs/connector-ch.md index 7f2f6f5cf6eb109991d80aee6916d5f85afbd8fd..e91c9d667a95563c28f57c130b5cb9ecaaffe8ae 100644 --- a/documentation/webdocs/markdowndocs/connector-ch.md +++ b/documentation/webdocs/markdowndocs/connector-ch.md @@ -4,13 +4,13 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、JAVA、 ## C/C++ Connector -C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 _taos.h_(安装后,位于_/usr/local/taos/include_): +C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 _taos.h_(安装后,位于 _/usr/local/taos/include_): ```C #include ``` -在编译时需要链接TDengine动态库_libtaos.so_(安装后,位于/usr/local/taos/driver,gcc编译时,请加上 -ltaos)。 所有API都以返回_-1_或_NULL_均表示失败。 +在编译时需要链接TDengine动态库 _libtaos.so_ (安装后,位于 _/usr/local/taos/driver_,gcc编译时,请加上 -ltaos)。 如未特别说明,当API的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。 ### C/C++同步API @@ -79,6 +79,51 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine **注意**:对于单个数据库连接,在同一时刻只能有一个线程使用该链接调用API,否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。 +### C/C++ 参数绑定接口 + +除了直接调用 `taos_query` 进行查询,TDengine也提供了支持参数绑定的Prepare API,与 MySQL 一样,这些API目前也仅支持用问号`?`来代表待绑定的参数,具体如下: + +- `TAOS_STMT* taos_stmt_init(TAOS *taos)` + + 创建一个 TAOS_STMT 对象用于后续调用。 + +- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)` + + 解析一条sql语句,将解析结果和参数信息绑定到stmt上,如果参数length大于0,将使用此此参数作为sql语句的长度,如等于0,将自动判断sql语句的长度。 + +- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)` + + 进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下: + + ```c + typedef struct TAOS_BIND { + int buffer_type; + void * buffer; + unsigned long buffer_length; // 未实际使用 + unsigned long *length; + int * is_null; + int is_unsigned; // 未实际使用 + int * error; // 未实际使用 + } TAOS_BIND; + ``` + +- `int taos_stmt_add_batch(TAOS_STMT *stmt)` + + 将当前绑定的参数加入批处理中,调用此函数后,可以再次调用`taos_stmt_bind_param`绑定新的参数。需要注意,此函数仅支持 insert/import 语句,如果是select等其他SQL语句,将返回错误。 + +- `int taos_stmt_execute(TAOS_STMT *stmt)` + + 执行准备好的语句。目前,一条语句只能执行一次。 + +- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` + + 获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result`以释放资源。 + +- `int taos_stmt_close(TAOS_STMT *stmt)` + + 执行完毕,释放所有资源。 + + ### C/C++异步API 同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。 @@ -143,58 +188,107 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 ## Java Connector -### JDBC接口 +TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 -如果用户使用Java开发企业级应用,可选用 TDengine 提供的 JDBC Driver 来调用服务。TDengine 提供的 JDBC Driver 是标准 JDBC 规范的子集,遵循 JDBC 标准 (3.0)API 规范,支持现有的各种 Java 开发框架。目前 TDengine 的 JDBC driver 已经发布到 Sonatype Maven Repository。因此用户开发时,需要在 pom.xml 文件中进行如下配置: +由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 -```xml +* libtaos.so + 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 + +* taos.dll + 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 + +> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。 - - - oss-sonatype - oss-sonatype - https://oss.sonatype.org/content/groups/public - - - +TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: + +* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 +* 由于不支持删除和修改,所以也不支持事务操作。 +* 目前不支持表间的 union 操作。 +* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。 + + +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 + +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| --- | --- | --- | +| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | Java DataType | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT, TINYINT |java.lang.Short | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | + +## 如何获取 TAOS-JDBCDriver + +### maven 仓库 + +目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 +* [sonatype][8] +* [mvnrepository][9] +* [maven.aliyun][10] + +maven 项目中使用如下 pom.xml 配置即可: + +```xml com.taosdata.jdbc taos-jdbcdriver - 1.0.1 + 1.0.3 - ``` -TDengine 的驱动程序包的在不同操作系统上依赖不同的本地函数库(均由C语言编写)。Linux系统上,依赖一个名为`libtaos.so` 的本地库,.so即"Shared Object"缩写。成功安装TDengine后,`libtaos.so` 文件会被自动拷贝至`/usr/local/lib/taos`目录下,该目录也包含在Linux上自动扫描路径上。Windows系统上,JDBC驱动程序依赖于一个名为`taos.dll` 的本地库,.dll是动态链接库"Dynamic Link Library"的缩写。Windows上成功安装客户端后,JDBC驱动程序包默认位于`C:/TDengine/driver/JDBC/`目录下;其依赖的动态链接库`taos.dll`文件位于`C:/TDengine/driver/C`目录下,`taos.dll` 会被自动拷贝至系统默认搜索路径`C:/Windows/System32`下。 +### 源码编译打包 -TDengine的JDBC Driver遵循标准JDBC规范,开发人员可以参考Oracle官方的JDBC相关文档来找到具体的接口和方法的定义与用法。TDengine的JDBC驱动在连接配置和支持的方法上与传统数据库驱动稍有不同。 +下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 -TDengine的JDBC URL规范格式为: -`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +## 使用说明 -其中,`{}`中的内容必须,`[]`中为可选。配置参数说明如下: +### 获取连接 -- user:登陆TDengine所用用户名;默认值root -- password:用户登陆密码;默认值taosdata -- charset:客户端使用的字符集;默认值为系统字符集 -- cfgdir:客户端配置文件目录路径;Linux OS上默认值`/etc/taos` ,Windows OS上默认值 `C:/TDengine/cfg` -- locale:客户端语言环境;默认值系统当前locale -- timezone:客户端使用的时区;默认值为系统当前时区 +如下所示配置即可获取 TDengine Connection: +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` +> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 -以上所有参数均可在调用java.sql.DriverManager类创建连接时指定,示例如下: +TDengine 的 JDBC URL 规范格式为: +`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` -```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.util.Properties; -import com.taosdata.jdbc.TSDBDriver; +其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: + +* user:登录 TDengine 用户名,默认值 root。 +* password:用户登录密码,默认值 taosdata。 +* charset:客户端使用的字符集,默认值为系统字符集。 +* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* locale:客户端语言环境,默认值系统当前 locale。 +* timezone:客户端使用的时区,默认值为系统当前时区。 +以上参数可以在 3 处配置,`优先级由高到低`分别如下: +1. JDBC URL 参数 + 如上所述,可以在 JDBC URL 的参数中指定。 +2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) +```java public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); @@ -207,38 +301,297 @@ public Connection getConn() throws Exception{ } ``` -这些配置参数中除了cfgdir外,均可在客户端配置文件taos.cfg中进行配置。调用java.sql.DriverManager时声明的配置参数优先级最高,JDBC URL的优先级次之,配置文件的优先级最低。例如charset同时在配置文件taos.cfg中配置,也在JDBC URL中配置,则使用JDBC URL中的配置值。 +3. 客户端配置文件 taos.cfg + + linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 +```properties +# client default username +# defaultUser root + +# client default password +# defaultPass taosdata + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` +> 更多详细配置请参考[客户端配置][13] + +### 创建数据库和表 -此外,尽管TDengine的JDBC驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致TDengine的Java API并不能与标准完全相同。对于有大量关系型数据库开发经验而初次接触TDengine的开发者来说,有以下一些值的注意的地方: +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` +> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 -* TDengine不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法 -* 目前TDengine不支持表间的join或union操作,因此也缺乏对该部分API的支持 -* TDengine支持批量写入,但是支持停留在SQL语句级别,而不是API级别,也就是说用户需要通过写特殊的SQL语句来实现批量 -* 目前TDengine不支持嵌套查询(nested query),对每个Connection的实例,至多只能有一个打开的ResultSet实例;如果在ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver则会自动关闭上一个ResultSet +### 插入数据 + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` +> now 为系统内部函数,默认为服务器当前时间。 +> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 -对于TDengine操作的报错信息,用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码,请参考TDengine提供的使用示范项目`JDBCDemo`。 +### 查询数据 + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` +> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 + + +### 关闭资源 + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` +> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 +## 与连接池使用 + +**HikariCP** + +* 引入相应 HikariCP maven 依赖: +```xml + + com.zaxxer + HikariCP + 3.4.1 + +``` + +* 使用示例如下: +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMinimumIdle(3); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool + config.setIdleTimeout(60000); // max idle time for recycle idle connection + config.setConnectionTestQuery("describe log.dn"); //validation query + config.setValidationTimeout(3000); //validation query timeout + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 +> 更多 HikariCP 使用问题请查看[官方说明][5] + +**Druid** + +* 引入相应 Druid maven 依赖: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* 使用示例如下: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 更多 druid 使用问题请查看[官方说明][6] + +**注意事项** +* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 + +如下所示,`select server_status()` 执行成功会返回 `1`。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +## 与框架使用 + +* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11] +* Springboot + Mybatis 中使用,可参考 [springbootdemo][12] + +## 常见问题 + +* java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **原因**:程序没有找到依赖的本地函数库 taos。 + + **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + +* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **原因**:目前 TDengine 只支持 64 位 JDK。 + + **解决方法**:重新安装 64 位 JDK。 + +* 其它问题请参考 [Issues][7] ## Python Connector +### 安装准备 +* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口) +* 已安装python 2.7 or >= 3.4 +* 已安装pip + ### Python客户端安装 +#### Linux + 用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装: -​ `pip install src/connector/python/python2/` +​ `pip install src/connector/python/linux/python2/` 或 -​ `pip install src/connector/python/python3/` +​ `pip install src/connector/python/linux/python3/` -如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。 +#### Windows +在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面 +```cmd +cd C:\TDengine\connector\python\windows +pip install python2\ +``` +或 +```cmd +cd C:\TDengine\connector\python\windows +pip install python3\ +``` -### Python客户端接口 +*如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。 +对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。 -在使用TDengine的python接口时,需导入TDengine客户端模块: +### 使用 -``` +#### 代码示例 + +* 导入TDengine客户端模块 + +```python import taos ``` +* 获取连接 +```python +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") +c1 = conn.cursor() +``` +*host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录 + +* 写入数据 +```python +import datetime + +# 创建数据库 +c1.execute('create database db') +c1.execute('use db') +# 建表 +c1.execute('create table tb (ts timestamp, temperature int, humidity float)') +# 插入数据 +start_time = datetime.datetime(2019, 11, 1) +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time) +# 批量插入数据 +time_interval = datetime.timedelta(seconds=60) +sqlcmd = ['insert into tb values'] +for irow in range(1,11): + start_time += time_interval + sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2)) +affected_rows = c1.execute(' '.join(sqlcmd)) +``` + +* 查询数据 +```python +c1.execute('select * from tb') +# 拉取查询结果 +data = c1.fetchall() +# 返回的结果是一个列表,每一行构成列表的一个元素 +numOfRows = c1.rowcount +numOfCols = len(c1.description) +for irow in range(numOfRows): + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]) + +# 直接使用cursor 循环拉取查询结果 +c1.execute('select * from tb') +for data in c1: + print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) +``` +* 关闭连接 +```python +c1.close() +conn.close() +``` +#### 帮助信息 用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法: @@ -260,21 +613,34 @@ import taos ### HTTP请求格式 -​ `http://:/rest/sql` +``` +http://:/rest/sql +``` + +参数说明: + +- IP: 集群中的任一台主机 +- PORT: 配置文件中httpPort配置项,缺省为6020 + +例如:http://192.168.0.1:6020/rest/sql 是指向IP地址为192.168.0.1的URL. -​ 参数说明: +HTTP请求的Header里需带有身份认证信息,TDengine支持Basic认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。 -​ IP: 集群中的任一台主机 +- 自定义身份认证信息如下所示(稍后介绍) -​ PORT: 配置文件中httpPort配置项,缺省为6020 +``` +Authorization: Taosd +``` -如:http://192.168.0.1:6020/rest/sql 是指向IP地址为192.168.0.1的URL. +- Basic身份认证信息如下所示 -HTTP请求的Header里需带有身份认证信息,TDengine单机版仅支持Basic认证机制。 +``` +Authorization: Basic +``` HTTP请求的BODY里就是一个完整的SQL语句,SQL语句中的数据表应提供数据库前缀,例如\.\。如果表名不带数据库前缀,系统会返回错误。因为HTTP模块只是一个简单的转发,没有当前DB的概念。 -使用curl来发起一个HTTP Request, 语法如下: +使用curl通过自定义身份认证方式来发起一个HTTP Request, 语法如下: ``` curl -H 'Authorization: Basic ' -d '' :/rest/sql @@ -286,11 +652,12 @@ curl -H 'Authorization: Basic ' -d '' :/rest/sql curl -u username:password -d '' :/rest/sql ``` -其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串,例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==` +其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串, 例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==` ### HTTP返回格式 -返回值为JSON格式,如下: +返回值为JSON格式,如下: + ``` { "status": "succ", @@ -305,26 +672,60 @@ curl -u username:password -d '' :/rest/sql 说明: -- 第一行”status”告知操作结果是成功还是失败; -- 第二行”head”是表的定义,如果不返回结果集,仅有一列“affected_rows”; -- 第三行是具体返回的数据,一排一排的呈现。如果不返回结果集,仅[[affected_rows]] -- 第四行”rows”表明总共多少行数据 +- status: 告知操作结果是成功还是失败 +- head: 表的定义,如果不返回结果集,仅有一列“affected_rows” +- data: 具体返回的数据,一排一排的呈现,如果不返回结果集,仅[[affected_rows]] +- rows: 表明总共多少行数据 -### 使用示例 +### 自定义授权码 + +HTTP请求中需要带有授权码``, 用于身份识别。授权码通常由管理员提供, 可简单的通过发送`HTTP GET`请求来获取授权码, 操作如下: -- 在demo库里查询表t1的所有记录, curl如下: +``` +curl http://:6020/rest/login// +``` - `curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql` +其中, `ip`是TDengine数据库的IP地址, `username`为数据库用户名, `password`为数据库密码, 返回值为`JSON`格式, 各字段含义如下: - 返回值: +- status:请求结果的标志位 + +- code:返回值代码 + +- desc: 授权码 + +获取授权码示例: + +``` +curl http://192.168.0.1:6020/rest/login/root/taosdata +``` + +返回值: + +``` +{ + "status": "succ", + "code": 0, + "desc": +"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" +} +``` + +### 使用示例 + +- 在demo库里查询表t1的所有记录: + +``` +curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql` +``` +返回值: ``` { "status": "succ", "head": ["column1","column2","column3"], "data": [ - ["2017-12-12 23:44:25.730", 1, 2.3], - ["2017-12-12 22:44:25.728", 4, 5.6] + ["2017-12-12 22:44:25.728",4,5.60000], + ["2017-12-12 23:44:25.730",1,2.30000] ], "rows": 2 } @@ -332,9 +733,11 @@ curl -u username:password -d '' :/rest/sql - 创建库demo: - `curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql` +``` +curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql` +``` - 返回值: +返回值: ``` { "status": "succ", @@ -344,17 +747,391 @@ curl -u username:password -d '' :/rest/sql } ``` +### 其他用法 + +#### 结果集采用Unix时间戳 + +HTTP请求URL采用`sqlt`时,返回结果集的时间戳将采用Unix时间戳格式表示,例如 + +``` +curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sqlt +``` + +返回值: + +``` +{ + "status": "succ", + "head": ["column1","column2","column3"], + "data": [ + [1513089865728,4,5.60000], + [1513093465730,1,2.30000] + ], + "rows": 2 +} +``` + +#### 结果集采用UTC时间字符串 + +HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间字符串表示,例如 +``` + curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sqlutc +``` + +返回值: + +``` +{ + "status": "succ", + "head": ["column1","column2","column3"], + "data": [ + ["2017-12-12T22:44:25.728+0800",4,5.60000], + ["2017-12-12T23:44:25.730+0800",1,2.30000] + ], + "rows": 2 +} +``` + +### 重要配置项 + +下面仅列出一些与RESTFul接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 + +- httpIp: 对外提供RESTFul服务的IP地址,默认绑定到0.0.0.0 +- httpPort: 对外提供RESTFul服务的端口号,默认绑定到6020 +- httpMaxThreads: 启动的线程数量,默认为2 +- httpCacheSessions: 缓存连接的数量,并发请求数目需小于此数值的10倍,默认值为100 +- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240 +- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 +- httpDebugFlag: 日志开关,131:仅错误和报警信息,135:所有,默认131 + + ## Go Connector -TDengine提供了GO驱动程序“taosSql”包。taosSql驱动包是基于GO的“database/sql/driver”接口的实现。用户可在安装后的/usr/local/taos/connector/go目录获得GO的客户端驱动程序。用户需将驱动包/usr/local/taos/connector/go/src/taosSql目录拷贝到应用程序工程的src目录下。然后在应用程序中导入驱动包,就可以使用“database/sql”中定义的接口访问TDengine: +#### 安装TDengine + +Go的连接器使用到了 libtaos.so 和taos.h,因此,在使用Go连接器之前,需要在程序运行的机器上安装TDengine以获得相关的驱动文件。 + +#### Go语言引入package +TDengine提供了GO驱动程序“taosSql”包。taosSql驱动包是基于GO的“database/sql/driver”接口的实现。用户可以通过`go get`命令来获取驱动包。 +```sh +go get github.com/taosdata/TDengine/src/connector/go/src/taosSql +``` +然后在应用程序中导入驱动包,就可以使用“database/sql”中定义的接口访问TDengine: ```Go import ( "database/sql" - _ "taosSql" + _ "github.com/taosdata/TDengine/src/connector/go/src/taosSql" ) ``` taosSql驱动包内采用cgo模式,调用了TDengine的C/C++同步接口,与TDengine进行交互,因此,在数据库操作执行完成之前,客户端应用将处于阻塞状态。单个数据库连接,在同一时刻只能有一个线程调用API。客户应用可以建立多个连接,进行多线程的数据写入或查询处理。 -更多使用的细节,请参考下载目录中的示例源码。 +#### Go语言使用参考 +在Go程序中使用TDengine写入方法大致可以分为以下几步 +1. 打开TDengine数据库链接 + +首先需要调用sql包中的Open方法,打开数据库,并获得db对象 +```go + db, err := sql.Open(taosDriverName, dbuser+":"+dbpassword+"@/tcp("+daemonUrl+")/"+dbname) + if err != nil { + log.Fatalf("Open database error: %s\n", err) + } + defer db.Close() +``` +其中参数为 +- taosDataname: 涛思数据库的名称,其值为字符串"taosSql" +- dbuser和dbpassword: 链接TDengine的用户名和密码,缺省为root和taosdata,类型为字符串 +- daemonUrl: 为TDengine的地址,其形式为`ip address:port`形式,port填写缺省值0即可。例如:"116.118.24.71:0" +- dbname:TDengine中的database名称,通过`create database`创建的数据库。如果为空则在后续的写入和查询操作必须通过”数据库名.超级表名或表名“的方式指定数据库名 + +2. 创建数据库 + +打开TDengine数据库连接后,首选需要创建数据库。基本用法和直接在TDengine客户端shell下一样,通过create database + 数据库名的方法来创建。 +```go + db, err := sql.Open(taosDriverName, dbuser+":"+dbpassword+"@/tcp("+daemonUrl+")/") + if err != nil { + log.Fatalf("Open database error: %s\n", err) + } + defer db.Close() + + //准备创建数据库语句 + sqlcmd := fmt.Sprintf("create database if not exists %s", dbname) + + //执行语句并检查错误 + _, err = db.Exec(sqlcmd) + if err != nil { + log.Fatalf("Create database error: %s\n", err) + } +``` + +3. 创建表、写入和查询数据 + +在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码 + +## Node.js Connector + +TDengine 同时也提供了node.js 的连接器。用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。[具体安装步骤如下](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs): + +首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器. + +```cmd +npm install td-connector +``` +我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下 + +To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: + +### Unix + +- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) +- `make` +- c语言编译器比如[GCC](https://gcc.gnu.org) + +### macOS + +- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) + +- Xcode + + - 然后通过Xcode安装 + + ``` + Command Line Tools + ``` + + 在 + ``` + Xcode -> Preferences -> Locations + ``` + + 目录下可以找到这个工具。或者在终端里执行 + + ``` + xcode-select --install + ``` + + + - 该步执行后 `gcc` 和 `make`就被安装上了 + +### Windows + +#### 安装方法1 + +使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 + +#### 安装方法2 + +手动安装以下工具: + +- 安装Visual Studio相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) +- 安装 [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` +- 进入`cmd`命令行界面, `npm config set msvs_version 2017` + +如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) + +如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64". + +### 使用方法 + +(http://docs.taosdata.com/node) +以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[该文档](http://docs.taosdata.com/node) + +#### 连接 + +使用node.js连接器时,必须先require```td-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信 + +```javascript +const taos = require('td-connector'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) +var cursor = conn.cursor(); // Initializing a new cursor +``` + +关闭连接可执行 + +```javascript +conn.close(); +``` + +#### 查询 + +可通过 ```cursor.query``` 函数来查询数据库。 + +```javascript +var query = cursor.query('show databases;') +``` + +查询的结果可以通过 ```query.execute()``` 函数获取并打印出来 + +```javascript +var promise = query.execute(); +promise.then(function(result) { + result.pretty(); +}); +``` +格式化查询语句还可以使用```query```的```bind```方法。如下面的示例:```query```会自动将提供的数值填入查询语句的```?```里。 + +```javascript +var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5); +query.execute().then(function(result) { + result.pretty(); +}) +``` +如果在```query```语句里提供第二个参数并设为```true```也可以立即获取查询结果。如下: + + +```javascript +var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true) +promise.then(function(result) { + result.pretty(); +}) +``` +#### 异步函数 +异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。 +```javascript +var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a() +var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a(); +promise1.then(function(result) { + result.pretty(); +}) +promise2.then(function(result) { + result.pretty(); +}) +``` + + +### 示例 +[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 + +[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. + +## CSharp Connector + +在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。 + +#### 安装TDengine客户端 + +C#连接器需要使用`libtaos.so`和`taos.h`。因此,在使用C#连接器之前,需在程序运行的Windows环境安装TDengine的Windows客户端,以便获得相关驱动文件。 + +安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件 + +- TDengineDriver.cs 调用taos.dll文件的Native C方法 +- TDengineTest.cs 参考程序示例 + +在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件 + +#### 使用方法 + +- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中 +- 参考TDengineTest.cs来定义数据库连接参数,及执行数据插入、查询等操作的方法 +- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中 + +#### 注意事项 + +- `taos.dll`文件使用x64平台编译,所以.NET项目在生成.exe文件时,“解决方案”/“项目”的“平台”请均选择“x64”。 +- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过,其它VS版本尚待验证。 + +#### 第三方驱动 + +Maikebing.Data.Taos是一个基于TDengine的RESTful Connector构建的ADO.Net提供器,该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考 + +``` +https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos +``` + +## Windows客户端及程序接口 + +### 客户端安装 + +在Windows操作系统下,TDengine提供64位的Windows客户端,客户端安装程序为.exe文件,运行该文件即可安装,安装路径为C:\TDengine。Windows的客户端可运行在主流的64位Windows平台之上,客户端目录结构如下: + +``` +├── cfg +├───└── taos.cfg +├── connector +├───├── go +├───├── grafana +├───├── jdbc +├───└── python +├── driver +├───├── taos.dll +├───├── taos.exp +├───└── taos.lib +├── examples +├───├── bash +├───├── c +├───├── C# +├───├── go +├───├── JDBC +├───├── lua +├───├── matlab +├───├── nodejs +├───├── python +├───├── R +├───└── rust +├── include +├───└── taos.h +└── taos.exe +``` + +其中,最常用的文件列出如下: + ++ Client可执行文件: C:/TDengine/taos.exe ++ 配置文件: C:/TDengine/cfg/taos.cfg ++ C驱动程序目录: C:/TDengine/driver ++ C驱动程序头文件: C:/TDengine/include ++ JDBC驱动程序目录: C:/TDengine/connector/jdbc ++ GO驱动程序目录:C:/TDengine/connector/go ++ Python驱动程序目录:C:/TDengine/connector/python ++ C#驱动程序及示例代码: C:/TDengine/examples/C# ++ 日志目录(第一次运行程序时生成):C:/TDengine/log + +### 注意事项 + +#### Shell工具注意事项 + +在开始菜单中搜索cmd程序,通过命令行方式执行taos.exe即可打开TDengine的Client程序,如下所示,其中ServerIP为TDengine所在Linux服务器的IP地址 + +``` +taos -h +``` + +在cmd中对taos的使用与Linux平台没有差别,但需要注意以下几点: + ++ 确保Windows防火墙或者其他杀毒软件处于关闭状态,TDengine的服务端与客户端通信的端口请参考`服务端配置`章节 ++ 确认客户端连接时指定了正确的服务器IP地址 ++ ping服务器IP,如果没有反应,请检查你的网络 + +#### C++接口注意事项 + +TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,需要包含TDengine头文件taos.h,连接时需要链接TDengine库taos.lib,运行时将taos.dll放到可执行文件目录下。 + +#### JDBC接口注意事项 + +在Windows系统上,应用程序可以使用JDBC接口来操纵数据库,使用JDBC接口的注意事项如下: + ++ 将JDBC驱动程序(JDBCDriver-1.0.0-dist.jar)放置到当前的CLASS_PATH中; + ++ 将Windows开发包(taos.dll)放置到system32目录下。 + +#### python接口注意事项 +在Windows系统上,应用程序可以通过导入taos这个模块来操纵数据库,使用python接口的注意事项如下: + ++ 确定在Windows上安装了TDengine客户端 + ++ 将Windows开发包(taos.dll)放置到system32目录下。 + + +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 1c9674338138e56c95dd0b395417a366f06fdad8..fa289c277cf273396375690c8a496fd4eacd77fe 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -58,6 +58,12 @@ # The server and client should have the same socket type. Otherwise, connect will fail. # sockettype udp +# The compressed rpc message, option: +# -1 (no compression) +# 0 (all message compressed), +# > 0 (rpc message body which larger than this value will be compressed) +# compressMsgSize -1 + # RPC re-try timer, millisecond # rpcTimer 300 @@ -94,6 +100,9 @@ # default system charset # charset UTF-8 +# system time zone +# timezone Asia/Shanghai (CST, +0800) + # enable/disable commit log # clog 1 diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst index b313f90e90eeb6c4c29e70a6e276aa5e49b3e786..352060556c9f53db19e3b6b74a1f94306762dfa4 100644 --- a/packaging/deb/DEBIAN/preinst +++ b/packaging/deb/DEBIAN/preinst @@ -9,13 +9,13 @@ fi if pidof taosd &> /dev/null; then if pidof systemd &> /dev/null; then ${csudo} systemctl stop taosd || : - elif $(which insserv &> /dev/null); then - ${csudo} service taosd stop || : - elif $(which update-rc.d &> /dev/null); then + elif $(which service &> /dev/null); then ${csudo} service taosd stop || : else pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi fi echo "Stop taosd service success!" sleep 1 diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index 1af6a0494331774a12fd297979dc98914490ce97..d24502a1cb8e69ddaf3989a89e51cc07dfb55f00 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -35,6 +35,8 @@ else ${csudo} rm -f ${data_link_dir} || : pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi fi diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index d54953113adf0b872bf538f17b97034f96483c37..5c2df734faf3195d783be9f337e3bd5f58cb1f64 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -7,6 +7,7 @@ compile_dir=$1 output_dir=$2 tdengine_ver=$3 +armver=$4 script_dir="$(dirname $(readlink -f $0))" top_dir="$(readlink -m ${script_dir}/../..)" @@ -24,8 +25,7 @@ fi mkdir -p ${pkg_dir} cd ${pkg_dir} -versioninfo=$(${script_dir}/../tools/get_version.sh ${script_dir}/../../src/util/src/version.c) -libfile="libtaos.so.${versioninfo}" +libfile="libtaos.so.${tdengine_ver}" # create install dir install_home_path="/usr/local/taos" @@ -49,6 +49,7 @@ cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_pat cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include +cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples cp -r ${top_dir}/src/connector/grafana ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector @@ -63,7 +64,16 @@ debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control #get taos version, then set deb name -debname="tdengine-"${tdengine_ver}".deb" +if [ -z "$armver" ]; then + debname="TDengine-"${tdengine_ver}".deb" +elif [ "$armver" == "arm64" ]; then + debname="TDengine-"${tdengine_ver}"-arm64.deb" +elif [ "$armver" == "arm32" ]; then + debname="TDengine-"${tdengine_ver}"-arm32.deb" +else + echo "input parameter error!!!" + return +fi # make deb package dpkg -b ${pkg_dir} $debname diff --git a/packaging/release.sh b/packaging/release.sh index 2091bed220ed11977dc30f5fd272e2369c2903ba..70f7af652f172b096ff734d65b2049d4d4e77ea2 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -3,7 +3,9 @@ # Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os set -e -#set -x +# set -x + +armver=$1 curr_dir=$(pwd) script_dir="$(dirname $(readlink -f $0))" @@ -107,24 +109,32 @@ build_time=$(date +"%F %R") echo "char version[64] = \"${version}\";" > ${versioninfo} echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo} echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} +echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo} echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo} # 2. cmake executable file -#default use debug mode -compile_mode="debug" -if [[ $1 == "Release" ]] || [[ $1 == "release" ]]; then - compile_mode="Release" -fi -compile_dir="${top_dir}/${compile_mode}" +compile_dir="${top_dir}/debug" if [ -d ${compile_dir} ]; then - ${csudo} rm -rf ${compile_dir} + ${csudo} rm -rf ${compile_dir} fi ${csudo} mkdir -p ${compile_dir} cd ${compile_dir} -${csudo} cmake -DCMAKE_BUILD_TYPE=${compile_mode} ${top_dir} -${csudo} make + +# arm only support lite ver +if [ -z "$armver" ]; then + cmake ../ +elif [ "$armver" == "arm64" ]; then + cmake ../ -DARMVER=arm64 +elif [ "$armver" == "arm32" ]; then + cmake ../ -DARMVER=arm32 +else + echo "input parameter error!!!" + return +fi + +make cd ${curr_dir} @@ -140,7 +150,7 @@ if [ -d ${output_dir} ]; then fi ${csudo} mkdir -p ${output_dir} cd ${script_dir}/deb -${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} +${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${armver} echo "do rpm package for the centos system" output_dir="${top_dir}/rpms" @@ -149,11 +159,12 @@ if [ -d ${output_dir} ]; then fi ${csudo} mkdir -p ${output_dir} cd ${script_dir}/rpm -${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} +${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${armver} echo "do tar.gz package for all systems" cd ${script_dir}/tools -${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" +${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${armver} +${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${armver} # 4. Clean up temporary compile directories #${csudo} rm -rf ${compile_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index 3b0f1d098ee580f4651832300dad58ec18e07e7f..aef01875cbb15963f9289983c8e42649707b10bf 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -2,10 +2,14 @@ # # Generate rpm package for centos +#set -e +#set -x + #curr_dir=$(pwd) compile_dir=$1 output_dir=$2 tdengine_ver=$3 +armver=$4 script_dir="$(dirname $(readlink -f $0))" top_dir="$(readlink -m ${script_dir}/../..)" @@ -24,8 +28,25 @@ if command -v sudo > /dev/null; then csudo="sudo" fi +function cp_rpm_package() { +local cur_dir +cd $1 +cur_dir=$(pwd) + +for dirlist in $(ls ${cur_dir}); do + if test -d ${dirlist}; then + cd ${dirlist} + cp_rpm_package ${cur_dir}/${dirlist} + cd .. + fi + if test -e ${dirlist}; then + cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm + fi +done +} + if [ -d ${pkg_dir} ]; then - ${csudo} rm -rf ${pkg_dir} + ${csudo} rm -rf ${pkg_dir} fi ${csudo} mkdir -p ${pkg_dir} cd ${pkg_dir} @@ -35,7 +56,14 @@ ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file} # copy rpm package to output_dir, then clean temp dir -#echo "rmpbuild end, cur_dir: $(pwd) " -${csudo} cp -rf RPMS/* ${output_dir} +#${csudo} cp -rf RPMS/* ${output_dir} +cp_rpm_package ${pkg_dir}/RPMS + +if [ "$armver" == "arm64" ]; then + mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/TDengine-${tdengine_ver}-arm64.rpm +elif [ "$armver" == "arm32" ]; then + mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/TDengine-${tdengine_ver}-arm32.rpm +fi + cd .. ${csudo} rm -rf ${pkg_dir} diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 6e9e5e6f8776607abb3ccc11296d0cf14cd90ba3..ef02fb90fca722571271207cd21b22f0080f73bb 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -2,7 +2,7 @@ %define cfg_install_dir /etc/taos %define __strip /bin/true -Name: tdengine +Name: TDengine Version: %{_version} Release: 3%{?dist} Summary: tdengine from taosdata @@ -39,8 +39,7 @@ echo topdir: %{_topdir} echo version: %{_version} echo buildroot: %{buildroot} -versioninfo=$(%{_compiledir}/../packaging/tools/get_version.sh ../../src/util/src/version.c) -libfile="libtaos.so.${versioninfo}" +libfile="libtaos.so.%{_version}" # create install path, and cp file mkdir -p %{buildroot}%{homepath}/bin @@ -62,6 +61,7 @@ cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include +cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp -r %{_compiledir}/../src/connector/grafana %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector @@ -79,18 +79,17 @@ fi if pidof taosd &> /dev/null; then if pidof systemd &> /dev/null; then ${csudo} systemctl stop taosd || : - elif $(which insserv &> /dev/null); then - ${csudo} service taosd stop || : - elif $(which update-rc.d &> /dev/null); then + elif $(which service &> /dev/null); then ${csudo} service taosd stop || : else pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi fi echo "Stop taosd service success!" sleep 1 fi - # if taos.cfg already softlink, remove it if [ -f %{cfg_install_dir}/taos.cfg ]; then ${csudo} rm -f %{homepath}/cfg/taos.cfg || : @@ -138,13 +137,16 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${data_link_dir} || : pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi fi fi diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 6b5af3f9ef68705e55b6200adb6dd6b3f806aecb..e1bcce401d419ce7a97c4110161777cfeea4f0e5 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# This file is used to install TAOS time-series database on linux systems. The operating system +# This file is used to install database on linux systems. The operating system # is required to use systemd to manage services at boot set -e @@ -41,19 +41,58 @@ if command -v sudo > /dev/null; then csudo="sudo" fi +initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which update-rc.d &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi else service_mod=2 fi + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then + echo "this is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then + echo "this is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then + echo "this is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then + echo "this is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then + echo "this is fedora system" + os_type=2 +else + echo "this is other linux system" + os_type=0 +fi + function kill_taosd() { pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 pid || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function install_main_path() { @@ -81,7 +120,7 @@ function install_bin() { #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : + [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : @@ -89,7 +128,7 @@ function install_bin() { function install_lib() { # Remove links - ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* @@ -98,24 +137,26 @@ function install_lib() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg } + function install_log() { ${csudo} rm -rf ${log_dir} || : ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} @@ -138,14 +179,26 @@ function install_examples() { } function clean_service_on_sysvinit() { - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi - ${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --del taosd || : + elif ((${initd_mod}==2)); then + ${csudo} insserv -r taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d -f taosd remove || : + fi + ${csudo} rm -f ${service_config_dir}/taosd || : - ${csudo} update-rc.d -f taosd remove || : - ${csudo} init q || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi } function install_service_on_sysvinit() { @@ -154,14 +207,27 @@ function install_service_on_sysvinit() { sleep 1 # Install taosd service - ${csudo} cp -f ${script_dir}/init.d/taosd ${install_main_dir}/init.d - ${csudo} cp ${script_dir}/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - - ${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - # TODO: for centos, change here - ${csudo} update-rc.d taosd defaults - # chkconfig mysqld on + + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/taosd.deb ${install_main_dir}/init.d/taosd + ${csudo} cp ${script_dir}/init.d/taosd.deb ${service_config_dir}/taosd && ${csudo} chmod a+x ${service_config_dir}/taosd + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/taosd.rpm ${install_main_dir}/init.d/taosd + ${csudo} cp ${script_dir}/init.d/taosd.rpm ${service_config_dir}/taosd && ${csudo} chmod a+x ${service_config_dir}/taosd + fi + + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add taosd || : + ${csudo} chkconfig --level 2345 taosd on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv taosd || : + ${csudo} insserv -d taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d taosd defaults || : + fi } function clean_service_on_systemd() { @@ -211,7 +277,7 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual start taosd + # must manual stop taosd kill_taosd fi } @@ -247,9 +313,9 @@ vercomp () { function is_version_compatible() { - curr_version=$(${bin_dir}/taosd -V | cut -d ' ' -f 1) + curr_version=$(${bin_dir}/taosd -V | head -1 | cut -d ' ' -f 3) - min_compatible_version=$(${script_dir}/bin/taosd -V | cut -d ' ' -f 2) + min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 5) vercomp $curr_version $min_compatible_version case $? in @@ -265,7 +331,7 @@ function update_TDengine() { echo "File taos.tar.gz does not exist" exit 1 fi - tar -zxf taos.tar.gz + tar -zxf taos.tar.gz # Check if version compatible if ! is_version_compatible; then @@ -273,7 +339,7 @@ function update_TDengine() { return 1 fi - echo -e "${GREEN}Start to update TDEngine...${NC}" + echo -e "${GREEN}Start to update TDengine...${NC}" # Stop the service if running if pidof taosd &> /dev/null; then if ((${service_mod}==0)); then @@ -305,8 +371,7 @@ function update_TDengine() { if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} update-rc.d taosd default ${RED} for the first time${NC}" - echo -e " : ${csudo} service taosd start ${RED} after${NC}" + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" else echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" fi @@ -315,7 +380,7 @@ function update_TDengine() { echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" else - install_bin $1 + install_bin install_config echo @@ -331,9 +396,9 @@ function install_TDengine() { echo "File taos.tar.gz does not exist" exit 1 fi - tar -zxf taos.tar.gz + tar -zxf taos.tar.gz - echo -e "${GREEN}Start to install TDEngine...${NC}" + echo -e "${GREEN}Start to install TDengine...${NC}" install_main_path @@ -361,10 +426,9 @@ function install_TDengine() { if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} update-rc.d taosd default ${RED} for the first time${NC}" - echo -e " : ${csudo} service taosd start ${RED} after${NC}" + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" else - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" fi echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 39437c145e0cf7881aa61344435b55663191a9ee..7560ebca4140688dbbee461e58ddcfef9a4b2391 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -1,4 +1,201 @@ #!/bin/bash +# +# This file is used to install TDengine client on linux systems. The operating system +# is required to use systemd to manage services at boot +set -e +#set -x + +# -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -m "$0")) -${script_dir}/install.sh client +# Dynamic directory +data_dir="/var/lib/taos" +log_dir="/var/log/taos" + +log_link_dir="/usr/local/taos/log" + +cfg_install_dir="/etc/taos" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/taos" + +# old bin dir +bin_dir="/usr/local/taos/bin" + + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +function kill_client() { + pid=$(ps -ef | grep "taos" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver + ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/taos || : + ${csudo} rm -f ${bin_link_dir}/taosdump || : + ${csudo} rm -f ${bin_link_dir}/rmtaos || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : +} + +function clean_lib() { + sudo rm -f /usr/lib/libtaos.so || : + sudo rm -rf ${lib_dir} || : +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_config() { + #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : + + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function update_TDengine() { + # Start to update + if [ ! -e taos.tar.gz ]; then + echo "File taos.tar.gz does not exist" + exit 1 + fi + tar -zxf taos.tar.gz + + echo -e "${GREEN}Start to update TDengine client...${NC}" + # Stop the client shell if running + if pidof taos &> /dev/null; then + kill_client + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + install_connector + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mTDengine client is updated successfully!${NC}" + + rm -rf $(tar -tf taos.tar.gz) +} + +function install_TDengine() { + # Start to install + if [ ! -e taos.tar.gz ]; then + echo "File taos.tar.gz does not exist" + exit 1 + fi + tar -zxf taos.tar.gz + + echo -e "${GREEN}Start to install TDengine client...${NC}" + + install_main_path + install_log + install_header + install_lib + install_connector + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" + + rm -rf $(tar -tf taos.tar.gz) +} + + +## ==============================Main program starts from here============================ +# Install or updata client and client +# if server is already install, don't install client + if [ -e ${bin_dir}/taosd ]; then + echo -e "\033[44;32;1mThere are already installed TDengine server, so don't need install client!${NC}" + exit 0 + fi + + if [ -x ${bin_dir}/taos ]; then + update_flag=1 + update_TDengine + else + install_TDengine + fi diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 4660ba5b2aafb197f086fc7b4804c910116f49be..b461d5c46da502651823562ae5a7af011a7f8a93 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -47,21 +47,54 @@ initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which insserv &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - initd_mod=1 - service_config_dir="/etc/init.d" -elif $(which update-rc.d &> /dev/null); then - service_mod=1 - initd_mod=2 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi else service_mod=2 fi + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then + echo "this is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then + echo "this is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then + echo "this is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then + echo "this is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then + echo "this is fedora system" + os_type=2 +else + echo "this is other linux system" + os_type=0 +fi + function kill_taosd() { pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function install_main_path() { @@ -109,9 +142,10 @@ function install_lib() { function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h || : - ${csudo} cp -f ${source_dir}/src/inc/taos.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { @@ -152,20 +186,26 @@ function install_examples() { } function clean_service_on_sysvinit() { - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi - ${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - ${csudo} rm -f ${service_config_dir}/taosd || : if ((${initd_mod}==1)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} insserv -r taosd || : + ${csudo} chkconfig --del taosd || : elif ((${initd_mod}==2)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} update-rc.d -f taosd remove || : + ${csudo} insserv -r taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d -f taosd remove || : + fi + + ${csudo} rm -f ${service_config_dir}/taosd || : + + if $(which init &> /dev/null); then + ${csudo} init q || : fi -# ${csudo} update-rc.d -f taosd remove || : - ${csudo} init q || : } function install_service_on_sysvinit() { @@ -174,19 +214,26 @@ function install_service_on_sysvinit() { sleep 1 # Install taosd service + if ((${os_type}==1)); then ${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d ${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - - ${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - # TODO: for centos, change here + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d + ${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd + fi + + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" + if ((${initd_mod}==1)); then - ${csudo} insserv taosd || : + ${csudo} chkconfig --add taosd || : + ${csudo} chkconfig --level 2345 taosd on || : elif ((${initd_mod}==2)); then + ${csudo} insserv taosd || : + ${csudo} insserv -d taosd || : + elif ((${initd_mod}==3)); then ${csudo} update-rc.d taosd defaults || : fi -# ${csudo} update-rc.d taosd defaults - # chkconfig mysqld on } function clean_service_on_systemd() { @@ -236,7 +283,7 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual start taosd + # must manual stop taosd kill_taosd fi } diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh new file mode 100755 index 0000000000000000000000000000000000000000..f8d21e202b8649c03d40d33d1c43c6338b12f790 --- /dev/null +++ b/packaging/tools/makeclient.sh @@ -0,0 +1,78 @@ +#!/bin/bash +# +# Generate tar.gz package for linux client +set -e +set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +armver=$4 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -m ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' +install_dir="${release_dir}/TDengine-client-${version}" + +# Directories and files. +bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" +lib_files="${build_dir}/lib/libtaos.so.${version}" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +cfg_dir="${top_dir}/packaging/cfg" +install_files="${script_dir}/install_client.sh" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* + +cd ${install_dir} +tar -zcv -f taos.tar.gz * --remove-files || : + +cd ${curr_dir} +cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* + +# Copy example code +mkdir -p ${install_dir}/examples +cp -r ${top_dir}/tests/examples/c ${install_dir}/examples +cp -r ${top_dir}/tests/examples/JDBC ${install_dir}/examples +cp -r ${top_dir}/tests/examples/matlab ${install_dir}/examples +cp -r ${top_dir}/tests/examples/python ${install_dir}/examples +cp -r ${top_dir}/tests/examples/R ${install_dir}/examples +cp -r ${top_dir}/tests/examples/go ${install_dir}/examples + +# Copy driver +mkdir -p ${install_dir}/driver +cp ${lib_files} ${install_dir}/driver + +# Copy connector +connector_dir="${code_dir}/connector" +mkdir -p ${install_dir}/connector +cp ${build_dir}/lib/*.jar ${install_dir}/connector +cp -r ${connector_dir}/grafana ${install_dir}/connector/ +cp -r ${connector_dir}/python ${install_dir}/connector/ +cp -r ${connector_dir}/go ${install_dir}/connector + +# Copy release note +# cp ${script_dir}/release_note ${install_dir} + +# exit 1 + +cd ${release_dir} +if [ -z "$armver" ]; then + tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files +elif [ "$armver" == "arm64" ]; then + tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files +elif [ "$armver" == "arm32" ]; then + tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files +fi + +cd ${curr_dir} diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 1a24ae8a1f4481abc69e27d6e654bf80cbb3225c..714b74dbe6952a5ef9387e9709551fb67cf441bf 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -6,6 +6,7 @@ curr_dir=$(pwd) compile_dir=$1 version=$2 build_time=$3 +armver=$4 script_dir="$(dirname $(readlink -f $0))" top_dir="$(readlink -m ${script_dir}/../..)" @@ -15,16 +16,15 @@ build_dir="${compile_dir}/build" code_dir="${top_dir}/src" release_dir="${top_dir}/release" -package_name='linux' -install_dir="${release_dir}/taos-${version}-${package_name}-$(echo ${build_time}| tr ': ' -)" +#package_name='linux' +install_dir="${release_dir}/TDengine-${version}" # Directories and files. bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh" -versioninfo=$(${script_dir}/get_version.sh ${code_dir}/util/src/version.c) -lib_files="${build_dir}/lib/libtaos.so.${versioninfo}" -header_files="${code_dir}/inc/taos.h" -cfg_files="${top_dir}/packaging/cfg/*.cfg" -install_files="${script_dir}/install.sh ${script_dir}/install_client.sh" +lib_files="${build_dir}/lib/libtaos.so.${version}" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +cfg_dir="${top_dir}/packaging/cfg" +install_files="${script_dir}/install.sh" # Init file #init_dir=${script_dir}/deb @@ -33,29 +33,32 @@ install_files="${script_dir}/install.sh ${script_dir}/install_client.sh" #fi #init_files=${init_dir}/taosd # temp use rpm's taosd. TODO: later modify according to os type -init_files=${script_dir}/../rpm/taosd +init_file_deb=${script_dir}/../deb/taosd +init_file_rpm=${script_dir}/../rpm/taosd # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_files} ${install_dir}/cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* -mkdir -p ${install_dir}/init.d && cp ${init_files} ${install_dir}/init.d +mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb +mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : +tar -zcv -f taos.tar.gz * --remove-files || : cd ${curr_dir} cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* # Copy example code mkdir -p ${install_dir}/examples -cp -r ${top_dir}/tests/examples/c ${install_dir}/examples -cp -r ${top_dir}/tests/examples/JDBC ${install_dir}/examples -cp -r ${top_dir}/tests/examples/matlab ${install_dir}/examples -cp -r ${top_dir}/tests/examples/python ${install_dir}/examples -cp -r ${top_dir}/tests/examples/R ${install_dir}/examples -cp -r ${top_dir}/tests/examples/go ${install_dir}/examples +examples_dir="${top_dir}/tests/examples" +cp -r ${examples_dir}/c ${install_dir}/examples +cp -r ${examples_dir}/JDBC ${install_dir}/examples +cp -r ${examples_dir}/matlab ${install_dir}/examples +cp -r ${examples_dir}/python ${install_dir}/examples +cp -r ${examples_dir}/R ${install_dir}/examples +cp -r ${examples_dir}/go ${install_dir}/examples # Copy driver mkdir -p ${install_dir}/driver @@ -64,18 +67,23 @@ cp ${lib_files} ${install_dir}/driver # Copy connector connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector +cp ${build_dir}/lib/*.jar ${install_dir}/connector cp -r ${connector_dir}/grafana ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/go ${install_dir}/connector -cp ${build_dir}/lib/*.jar ${install_dir}/connector - # Copy release note -cp ${script_dir}/release_note ${install_dir} +# cp ${script_dir}/release_note ${install_dir} # exit 1 cd ${release_dir} -tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files +if [ -z "$armver" ]; then + tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files +elif [ "$armver" == "arm64" ]; then + tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files +elif [ "$armver" == "arm32" ]; then + tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files +fi cd ${curr_dir} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 9e1878365737d614369d536932ba0b00918d2a4f..a62f7a5eeb44730f4d68d5f018eeea49b807da3a 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -42,14 +42,18 @@ initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which insserv &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - initd_mod=1 - service_config_dir="/etc/init.d" -elif $(which update-rc.d &> /dev/null); then - service_mod=1 - initd_mod=2 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi else service_mod=2 fi @@ -57,12 +61,15 @@ fi function kill_taosd() { # ${csudo} pkill -f taosd || : pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function install_include() { - ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h|| : ${csudo} ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h } function install_lib() { @@ -102,20 +109,26 @@ function install_config() { } function clean_service_on_sysvinit() { - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi - ${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - ${csudo} rm -f ${service_config_dir}/taosd || : - + if ((${initd_mod}==1)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} insserv -r taosd || : + ${csudo} chkconfig --del taosd || : elif ((${initd_mod}==2)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} update-rc.d -f taosd remove || : + ${csudo} insserv -r taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d -f taosd remove || : + fi + + ${csudo} rm -f ${service_config_dir}/taosd || : + + if $(which init &> /dev/null); then + ${csudo} init q || : fi -# ${csudo} update-rc.d -f taosd remove || : - ${csudo} init q || : } function install_service_on_sysvinit() { @@ -126,18 +139,24 @@ function install_service_on_sysvinit() { # Install taosd service ${csudo} cp %{init_d_dir}/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - - ${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - # TODO: for centos, change here - ${csudo} update-rc.d taosd defaults - # chkconfig mysqld on + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add taosd || : + ${csudo} chkconfig --level 2345 taosd on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv taosd || : + ${csudo} insserv -d taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d taosd defaults || : + fi } function clean_service_on_systemd() { taosd_service_config="${service_config_dir}/taosd.service" - # taosd service already is stoped before install + # taosd service already is stoped before install in preinst script #if systemctl is-active --quiet taosd; then # echo "TDengine is running, stopping it..." # ${csudo} systemctl stop taosd &> /dev/null || echo &> /dev/null diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index 4566db1b5afd185c415238f6501277da3638fcdc..0533410802c09e0aee2e60766d5c621dd53c4272 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -26,22 +26,27 @@ initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which insserv &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - initd_mod=1 - service_config_dir="/etc/init.d" -elif $(which update-rc.d &> /dev/null); then - service_mod=1 - initd_mod=2 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi else service_mod=2 fi - function kill_taosd() { pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function clean_service_on_systemd() { @@ -57,20 +62,27 @@ function clean_service_on_systemd() { } function clean_service_on_sysvinit() { - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then + echo "TDengine taosd is running, stopping it..." ${csudo} service taosd stop || : fi - ${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - ${csudo} rm -f ${service_config_dir}/taosd || : if ((${initd_mod}==1)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} insserv -r taosd || : + ${csudo} chkconfig --del taosd || : elif ((${initd_mod}==2)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} update-rc.d -f taosd remove || : + ${csudo} insserv -r taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d -f taosd remove || : + fi + + ${csudo} rm -f ${service_config_dir}/taosd || : + + if $(which init &> /dev/null); then + ${csudo} init q || : fi -# ${csudo} update-rc.d -f taosd remove || : - ${csudo} init q || : } function clean_service() { @@ -79,7 +91,7 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual start taosd + # must manual stop taosd kill_taosd fi } @@ -94,6 +106,7 @@ ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : +${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${log_link_dir} || : diff --git a/packaging/tools/release_note b/packaging/tools/release_note index 3a3cd81ca91bc274e8375d12d3e7de7890b016f6..4578a4523c50f3b0764ce05add5920cf0be72172 100644 --- a/packaging/tools/release_note +++ b/packaging/tools/release_note @@ -1,3 +1,14 @@ +taos-1.6.4.0 (Release on 2019-12-01) +Bug fixed: + 1.Look for possible causes of file corruption and fix them + 2.Encapsulate memory allocation functions to reduce the possibility of crashes + 3.Increase Arm64 compilation options + 4.Remove most of the warnings in the code + 5.Provide a variety of connector usage documents + 6.Network connection can be selected in udp and tcp + 7.Allow the maximum number of Tags to be 32 + 8.Bugs reported by the user + taos-1.5.2.6 (Release on 2019-05-13) Bug fixed: - Nchar strings sometimes were wrongly truncated on Window diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 4ada19762c55034751d64de0dd063100424b3503..3c9fd6bf7ff7b3098d900535a52a3e81f1368a1e 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Script to stop the service and uninstall tdengine, but retain the config, data and log files. +# Script to stop the service and uninstall TDengine, but retain the config, data and log files. RED='\033[0;31m' GREEN='\033[1;32m' @@ -27,21 +27,27 @@ initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which insserv &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - initd_mod=1 - service_config_dir="/etc/init.d" -elif $(which update-rc.d &> /dev/null); then - service_mod=1 - initd_mod=2 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi else service_mod=2 fi function kill_taosd() { pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function clean_bin() { @@ -61,6 +67,7 @@ function clean_lib() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { @@ -86,20 +93,27 @@ function clean_service_on_systemd() { } function clean_service_on_sysvinit() { - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then + echo "TDengine taosd is running, stopping it..." ${csudo} service taosd stop || : fi - ${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - ${csudo} rm -f ${service_config_dir}/taosd || : if ((${initd_mod}==1)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} insserv -r taosd || : + ${csudo} chkconfig --del taosd || : elif ((${initd_mod}==2)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} update-rc.d -f taosd remove || : + ${csudo} insserv -r taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d -f taosd remove || : + fi + + ${csudo} rm -f ${service_config_dir}/taosd || : + + if $(which init &> /dev/null); then + ${csudo} init q || : fi -# ${csudo} update-rc.d -f taosd remove || : - ${csudo} init q || : } function clean_service() { @@ -108,7 +122,7 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual start taosd + # must manual stop taosd kill_taosd fi } @@ -139,4 +153,4 @@ elif echo $osinfo | grep -qwi "centos" ; then ${csudo} rpm -e --noscripts tdengine || : fi -echo -e "${GREEN}TDEngine is removed successfully!${NC}" +echo -e "${GREEN}TDengine is removed successfully!${NC}" diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh new file mode 100755 index 0000000000000000000000000000000000000000..206de34c1f74325dd41773358bbe2c57690ca153 --- /dev/null +++ b/packaging/tools/remove_client.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# +# Script to stop the client and uninstall database, but retain the config and log files. +set -e +# set -x + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/taos" + +log_link_dir="/usr/local/taos/log" +cfg_link_dir="/usr/local/taos/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +inc_link_dir="/usr/include" + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +function kill_client() { + #pid=$(ps -ef | grep "taos" | grep -v "grep" | awk '{print $2}') + if [ -n "$(pidof taos)" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/taos || : + ${csudo} rm -f ${bin_link_dir}/taosump || : + ${csudo} rm -f ${bin_link_dir}/rmtaos || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +# Stop client. +kill_client +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}TDengine client is removed successfully!${NC}" diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 3bb2f748680103da37f04d35b515188bcee09ea3..d3baf84d6754ad2edcf1ac193cd46020760553ea 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -7,7 +7,7 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) AUX_SOURCE_DIRECTORY(./src SRC) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) # set the static lib name diff --git a/src/client/inc/tscCache.h b/src/client/inc/tscCache.h index 096a6618f6e9fdef2a39886b9c55c2f960bc3c44..4c6acec096c01db64b09c4f0d18f404b8825f7b6 100644 --- a/src/client/inc/tscCache.h +++ b/src/client/inc/tscCache.h @@ -24,9 +24,9 @@ void *taosOpenConnCache(int maxSessions, void (*cleanFp)(void *), void *tmrCtrl, void taosCloseConnCache(void *handle); -void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, short port, char *user); +void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, uint16_t port, char *user); -void *taosGetConnFromCache(void *handle, uint32_t ip, short port, char *user); +void *taosGetConnFromCache(void *handle, uint32_t ip, uint16_t port, char *user); #ifdef __cplusplus } diff --git a/src/inc/tsql.h b/src/client/inc/tscSQLParser.h similarity index 95% rename from src/inc/tsql.h rename to src/client/inc/tscSQLParser.h index 0bcd8ffa68591b3fcc8635d417a5411643ff4663..34faad525b6b5e8472403cb616c833cc57b78b0f 100644 --- a/src/inc/tsql.h +++ b/src/client/inc/tscSQLParser.h @@ -23,23 +23,10 @@ extern "C" { #include "taos.h" #include "tsqldef.h" #include "ttypes.h" - -#define TK_SPACE 200 -#define TK_COMMENT 201 -#define TK_ILLEGAL 202 -#define TK_HEX 203 // hex number 0x123 -#define TK_OCT 204 // oct number -#define TK_BIN 205 // bin format data 0b111 -#define TK_FILE 206 - -#define TSQL_SO_ASC 1 -#define TSQL_SO_DESC 0 +#include "taosmsg.h" #define MAX_TOKEN_LEN 30 -#define TSQL_TBNAME "TBNAME" -#define TSQL_TBNAME_L "tbname" - // token type enum { TSQL_NODE_TYPE_EXPR = 0x1, @@ -118,6 +105,7 @@ enum TSQL_TYPE { SHOW_MODULES = 0x6c, SHOW_CONNECTIONS = 0x6d, SHOW_GRANTS = 0x6e, + SHOW_VNODES = 0x6f, // create dnode CREATE_DNODE = 0x80, @@ -277,8 +265,7 @@ typedef struct tSQLExpr { uint32_t nSQLOptr; // TK_FUNCTION: sql function, TK_LE: less than(binary expr) // the full sql string of function(col, param), which is actually the raw - // field name, - // since the function name is kept in nSQLOptr already + // field name, since the function name is kept in nSQLOptr already SSQLToken operand; struct tSQLExprList *pParam; // function parameters @@ -345,8 +332,6 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SSQLToken void tSQLExprListDestroy(tSQLExprList *pList); -int32_t tSQLSyntaxNodeToString(tSQLExpr *pNode, char *dst); - SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere, tVariantList *pGroupby, tVariantList *pSortOrder, SSQLToken *pInterval, SSQLToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); @@ -378,6 +363,7 @@ tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken); void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists); void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo); +void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo); // prefix show db.tables; void setDBName(SSQLToken *pCpxName, SSQLToken *pDB); diff --git a/src/client/inc/tscSecondaryMerge.h b/src/client/inc/tscSecondaryMerge.h index 4c95994dfab1ea1c5a259ed7f13ddd91b91c6e1e..0c6472f6b367857edbdc92a08e0bc8a263572ee1 100644 --- a/src/client/inc/tscSecondaryMerge.h +++ b/src/client/inc/tscSecondaryMerge.h @@ -94,7 +94,7 @@ typedef struct SRetrieveSupport { tOrderDescriptor *pOrderDescriptor; tColModel * pFinalColModel; // colModel for final result SSubqueryState * pState; - int32_t vnodeIdx; // index of current vnode in vnode list + int32_t subqueryIndex; // index of current vnode in vnode list SSqlObj * pParentSqlObj; tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to uint32_t numOfRetry; // record the number of retry times diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 0474697c3f6e56e8d7e22088992ae04b70892066..5d828d7cf0aaedd7abc20b134b24219212e1e8cb 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -23,17 +23,17 @@ extern "C" { /* * @date 2018/09/30 */ -#include -#include +#include "os.h" #include "textbuffer.h" +#include "tscSecondaryMerge.h" #include "tsclient.h" #include "tsdb.h" -#include "tscSecondaryMerge.h" -#define UTIL_METER_IS_METRIC(cmd) (((cmd)->pMeterMeta != NULL) && ((cmd)->pMeterMeta->meterType == TSDB_METER_METRIC)) -#define UTIL_METER_IS_NOMRAL_METER(cmd) (!(UTIL_METER_IS_METRIC(cmd))) -#define UTIL_METER_IS_CREATE_FROM_METRIC(cmd) \ - (((cmd)->pMeterMeta != NULL) && ((cmd)->pMeterMeta->meterType == TSDB_METER_MTABLE)) +#define UTIL_METER_IS_METRIC(metaInfo) \ + (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC)) +#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo))) +#define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \ + (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE)) #define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0) @@ -52,7 +52,6 @@ typedef struct SParsedDataColInfo { typedef struct SJoinSubquerySupporter { SSubqueryState* pState; SSqlObj* pObj; // parent SqlObj - bool hasMore; // has data from vnode to fetch int32_t subqueryIndex; // index of sub query int64_t interval; // interval time SLimitVal limit; // limit info @@ -62,28 +61,25 @@ typedef struct SJoinSubquerySupporter { SFieldInfo fieldsInfo; STagCond tagCond; SSqlGroupbyExpr groupbyExpr; - - struct STSBuf* pTSBuf; - - FILE* f; - char path[PATH_MAX]; + struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array + FILE* f; // temporary file in order to create TSBuf + char path[PATH_MAX]; // temporary file path } SJoinSubquerySupporter; -void tscDestroyDataBlock(STableDataBlocks* pDataBlock); -STableDataBlocks* tscCreateDataBlock(int32_t size); -void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks); -SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, uint32_t offset); - -SDataBlockList* tscCreateBlockArrayList(); -void* tscDestroyBlockArrayList(SDataBlockList* pList); -int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); -void tscFreeUnusedDataBlocks(SDataBlockList* pList); -int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList); +void tscDestroyDataBlock(STableDataBlocks* pDataBlock); +STableDataBlocks* tscCreateDataBlock(size_t initialBufSize, int32_t rowSize, int32_t startOffset, const char* name); +void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks); +SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, + uint32_t offset); + +SDataBlockList* tscCreateBlockArrayList(); +void* tscDestroyBlockArrayList(SDataBlockList* pList); +int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); +void tscFreeUnusedDataBlocks(SDataBlockList* pList); +int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList); STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, char* tableId); -STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name); - -SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); + int32_t startOffset, int32_t rowSize, const char* tableId); +SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); /** @@ -97,6 +93,8 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); bool tscIsPointInterpQuery(SSqlCmd* pCmd); bool tscIsTWAQuery(SSqlCmd* pCmd); bool tscProjectionQueryOnMetric(SSqlCmd* pCmd); +bool tscProjectionQueryOnTable(SSqlCmd* pCmd); + bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd); bool tscQueryOnMetric(SSqlCmd* pCmd); bool tscQueryMetricTags(SSqlCmd* pCmd); @@ -106,15 +104,9 @@ void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t SSchema* pColSchema, int16_t isTag); void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex); -void SStringFree(SString* str); -void SStringCopy(SString* pDest, const SString* pSrc); -SString SStringCreate(const char* str); - -int32_t SStringAlloc(SString* pStr, int32_t size); -int32_t SStringEnsureRemain(SString* pStr, int32_t size); int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex); -void tscClearInterpInfo(SSqlCmd* pCmd); +void tscClearInterpInfo(SSqlCmd* pCmd); bool tscIsInsertOrImportData(char* sqlstr); @@ -125,7 +117,7 @@ int tscAllocPayload(SSqlCmd* pCmd, int size); void tscFieldInfoSetValFromSchema(SFieldInfo* pFieldInfo, int32_t index, SSchema* pSchema); void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* pField); -void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, char* name, int16_t bytes); +void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, const char* name, int16_t bytes); void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible); void tscFieldInfoCalOffset(SSqlCmd* pCmd); @@ -134,27 +126,29 @@ void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst); TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index); -int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index); -int32_t tscGetResRowLength(SSqlCmd* pCmd); -void tscClearFieldInfo(SFieldInfo* pFieldInfo); +int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index); +int32_t tscGetResRowLength(SSqlCmd* pCmd); +void tscClearFieldInfo(SFieldInfo* pFieldInfo); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, /*int16_t colId,*/ int16_t interSize); + int16_t size, int16_t interSize); +SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId); + SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size); SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index); -void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid); +void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid); SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* colIndex); -void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src); -void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src); +void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src); +void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src); -void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex); +void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex); SColumnBase* tscColumnBaseInfoGet(SColumnBaseInfo* pColumnBaseInfo, int32_t index); -void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex); +void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex); void tscColumnBaseInfoReserve(SColumnBaseInfo* pColumnBaseInfo, int32_t size); void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo); @@ -167,11 +161,10 @@ bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId); // get starter position of metric query condition (query on tags) in SSqlCmd.payload SCond* tsGetMetricQueryCondPos(STagCond* pCond, uint64_t tableIndex); -void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); +void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); void tscTagCondCopy(STagCond* dest, const STagCond* src); void tscTagCondRelease(STagCond* pCond); -void tscTagCondSetQueryCondType(STagCond* pCond, int16_t type); void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd); @@ -180,19 +173,19 @@ bool tscShouldFreeHeatBeat(SSqlObj* pHb); void tscCleanSqlCmd(SSqlCmd* pCmd); bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql); -void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache); +void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache); SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index); SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index); -void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache); +void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache); SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, int16_t numOfTags, int16_t* tags); SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd); void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr, uint64_t uid); -int tscGetMetricMeta(SSqlObj* pSql); -int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex); -int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists); +int tscGetMetricMeta(SSqlObj* pSql); +int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex); +int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists); void tscResetForNextRetrieve(SSqlRes* pRes); @@ -216,18 +209,20 @@ void tscDoQuery(SSqlObj* pSql); * @param pPrevSql * @return */ -SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param, - SSqlObj* pPrevSql); -void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex); +SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql); +void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex); void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex); -int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid); +int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid); -TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, int port, void (*fp)(void*, TAOS_RES*, int), +TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void (*fp)(void*, TAOS_RES*, int), void* param, void** taos); void sortRemoveDuplicates(STableDataBlocks* dataBuf); + +void tscPrintSelectClause(SSqlCmd* pCmd); + #ifdef __cplusplus } #endif diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 0d2c7dace3ad0d96daf13ea7bc6417f00d85df7b..6adf2f1be161bc73de1891175a56c9b34f26072f 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -34,8 +34,8 @@ extern "C" { #include "tglobalcfg.h" #include "tlog.h" #include "tscCache.h" +#include "tscSQLParser.h" #include "tsdb.h" -#include "tsql.h" #include "tsqlfunction.h" #include "tutil.h" @@ -92,7 +92,12 @@ enum _sql_cmd { */ TSDB_SQL_RETRIEVE_EMPTY_RESULT, - TSDB_SQL_RESET_CACHE, + TSDB_SQL_RESET_CACHE, // 40 + TSDB_SQL_SERV_STATUS, + TSDB_SQL_CURRENT_DB, + TSDB_SQL_SERV_VERSION, + TSDB_SQL_CLI_VERSION, + TSDB_SQL_CURRENT_USER, TSDB_SQL_CFG_LOCAL, TSDB_SQL_MAX @@ -102,22 +107,25 @@ enum _sql_cmd { struct SSqlInfo; typedef struct SSqlGroupbyExpr { - int16_t tableIndex; - + int16_t tableIndex; int16_t numOfGroupCols; SColIndexEx columnInfo[TSDB_MAX_TAGS]; // group by columns information - - int16_t orderIndex; // order by column index - int16_t orderType; // order by type: asc/desc + int16_t orderIndex; // order by column index + int16_t orderType; // order by type: asc/desc } SSqlGroupbyExpr; typedef struct SMeterMetaInfo { - SMeterMeta * pMeterMeta; // metermeta - SMetricMeta *pMetricMeta; // metricmeta - - char name[TSDB_METER_ID_LEN + 1]; - int16_t numOfTags; // total required tags in query, including groupby tags - int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection + SMeterMeta * pMeterMeta; // metermeta + SMetricMeta *pMetricMeta; // metricmeta + + /* + * 1. keep the vnode index during the multi-vnode super table projection query + * 2. keep the vnode index for multi-vnode insertion + */ + int32_t vnodeIndex; + char name[TSDB_METER_ID_LEN + 1]; // table(super table) name + int16_t numOfTags; // total required tags in query, including groupby tags + int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection } SMeterMetaInfo; /* the structure for sql function in select clause */ @@ -183,7 +191,7 @@ typedef struct SString { typedef struct SCond { uint64_t uid; - SString cond; + char * cond; } SCond; typedef struct SJoinNode { @@ -214,41 +222,46 @@ typedef struct STagCond { } STagCond; typedef struct SParamInfo { - int32_t idx; - char type; - uint8_t timePrec; - short bytes; + int32_t idx; + char type; + uint8_t timePrec; + short bytes; uint32_t offset; } SParamInfo; typedef struct STableDataBlocks { char meterId[TSDB_METER_ID_LEN]; - int8_t tsSource; - bool ordered; - - int64_t vgid; - int64_t prevTS; - - int32_t numOfMeters; + int8_t tsSource; // where does the UNIX timestamp come from, server or client + bool ordered; // if current rows are ordered or not + int64_t vgid; // virtual group id + int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending + int32_t numOfMeters; // number of tables in current submit block - int32_t rowSize; + int32_t rowSize; // row size for current table uint32_t nAllocSize; uint32_t size; + + /* + * the metermeta for current table, the metermeta will be used during submit stage, keep a ref + * to avoid it to be removed from cache + */ + SMeterMeta* pMeterMeta; + union { char *filename; char *pData; }; // for parameter ('?') binding - uint32_t numOfAllocedParams; - uint32_t numOfParams; - SParamInfo* params; + uint32_t numOfAllocedParams; + uint32_t numOfParams; + SParamInfo *params; } STableDataBlocks; typedef struct SDataBlockList { int32_t idx; - int32_t nSize; - int32_t nAlloc; + uint32_t nSize; + uint32_t nAlloc; char * userParam; /* user assigned parameters for async query */ void * udfp; /* user defined function pointer, used in async model */ STableDataBlocks **pData; @@ -257,18 +270,17 @@ typedef struct SDataBlockList { typedef struct { SOrderVal order; int command; - - // TODO refactor - int count; - int16_t isInsertFromFile; // load data from file or not + int count; // TODO refactor union { - bool existsCheck; - int8_t showType; + bool existsCheck; // check if the table exists + int8_t showType; // show command type }; - char msgType; - uint16_t type; + int8_t isInsertFromFile; // load data from file or not + bool import; // import/insert type + uint8_t msgType; + uint16_t type; // query type char intervalTimeUnit; int64_t etime, stime; int64_t nAggTimeInterval; // aggregation time interval @@ -281,20 +293,19 @@ typedef struct { * * In such cases, allocate the memory dynamically, and need to free the memory */ - uint32_t allocSize; - char * payload; - int payloadLen; - short numOfCols; + uint32_t allocSize; + char * payload; + int payloadLen; + short numOfCols; SColumnBaseInfo colList; - SFieldInfo fieldsInfo; - SSqlExprInfo exprsInfo; - SLimitVal limit; - SLimitVal slimit; - int64_t globalLimit; - STagCond tagCond; - int16_t vnodeIdx; // vnode index in pMetricMeta for metric query - int16_t interpoType; // interpolate type - int16_t numOfTables; + SFieldInfo fieldsInfo; + SSqlExprInfo exprsInfo; + SLimitVal limit; + SLimitVal slimit; + int64_t globalLimit; + STagCond tagCond; + int16_t interpoType; // interpolate type + int16_t numOfTables; // submit data blocks branched according to vnode SDataBlockList * pDataBlocks; @@ -342,11 +353,11 @@ typedef struct _tsc_obj { void * signature; void * pTimer; char mgmtIp[TSDB_USER_LEN]; - short mgmtPort; + uint16_t mgmtPort; char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; char acctId[TSDB_DB_NAME_LEN]; - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_METER_ID_LEN]; char sversion[TSDB_VERSION_LEN]; char writeAuth : 1; char superAuth : 1; @@ -362,26 +373,26 @@ typedef struct _sql_obj { STscObj *pTscObj; void (*fp)(); void (*fetchFp)(); - void * param; - uint32_t ip; - short vnode; - int64_t stime; - uint32_t queryId; - void * thandle; - void * pStream; - char * sqlstr; - char retry; - char maxRetry; - char index; - char freed : 4; - char listed : 4; - tsem_t rspSem; - tsem_t emptyRspSem; - - SSqlCmd cmd; - SSqlRes res; - - char numOfSubs; + void * param; + uint32_t ip; + short vnode; + int64_t stime; + uint32_t queryId; + void * thandle; + void * pStream; + char * sqlstr; + char retry; + char maxRetry; + uint8_t index; + char freed : 4; + char listed : 4; + tsem_t rspSem; + tsem_t emptyRspSem; + SSqlCmd cmd; + SSqlRes res; + uint8_t numOfSubs; + char* asyncTblPos; + void* pTableHashList; struct _sql_obj **pSubs; struct _sql_obj * prev, *next; } SSqlObj; @@ -425,11 +436,11 @@ int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion); void tscInitMsgs(); void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle); -int tscProcessSql(SSqlObj *pSql); +int tscProcessSql(SSqlObj *pSql); void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows); -int tscRenewMeterMeta(SSqlObj *pSql, char *meterId); +int tscRenewMeterMeta(SSqlObj *pSql, char *meterId); void tscQueueAsyncRes(SSqlObj *pSql); void tscQueueAsyncError(void(*fp), void *param); @@ -443,18 +454,12 @@ int taos_retrieve(TAOS_RES *res); * before send query message to vnode */ int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd *pCmd); -void tscRestoreSQLFunctionForMetricQuery(SSqlCmd *pCmd); - -/** - * release both metric/meter meta information - * @param pCmd SSqlCmd object that contains the metric/meter meta info - */ -void tscClearSqlMetaInfo(SSqlCmd *pCmd); +void tscRestoreSQLFunctionForMetricQuery(SSqlCmd *pCmd); void tscClearSqlMetaInfoForce(SSqlCmd *pCmd); int32_t tscCreateResPointerInfo(SSqlCmd *pCmd, SSqlRes *pRes); -void tscDestroyResPointerInfo(SSqlRes *pRes); +void tscDestroyResPointerInfo(SSqlRes *pRes); void tscFreeSqlCmdData(SSqlCmd *pCmd); @@ -474,11 +479,14 @@ void tscFreeSqlObj(SSqlObj *pObj); void tscCloseTscObj(STscObj *pObj); -void tscProcessMultiVnodesInsert(SSqlObj *pSql); -void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql); -void tscKillMetricQuery(SSqlObj *pSql); -void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); -int32_t tscBuildResultsForEmptyRetrieval(SSqlObj *pSql); +void tscProcessMultiVnodesInsert(SSqlObj *pSql); +void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql); +void tscKillMetricQuery(SSqlObj *pSql); +void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); +bool tscIsUpdateQuery(STscObj *pObj); +bool tscHasReachLimitation(SSqlObj* pSql); + +int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); // transfer SSqlInfo to SqlCmd struct int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 9f9632cadc3b0f8abb80d43f0eed87981f0db7c0..958252b4deca7708c99e6b762613813c2f9d330b 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -9,6 +9,22 @@ extern "C" { #endif #undef com_taosdata_jdbc_TSDBJNIConnector_INVALID_CONNECTION_POINTER_VALUE #define com_taosdata_jdbc_TSDBJNIConnector_INVALID_CONNECTION_POINTER_VALUE 0LL +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: + * Signature: (Ljava/lang/String;)V + */ +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setAllocModeImp + (JNIEnv *, jclass, jint, jstring, jboolean); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: + * Signature: ()Ljava/lang/String; + */ +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_dumpMemoryLeakImp + (JNIEnv *, jclass); + /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: initImp diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index fb175618c0429d08e86976c5dc7f20c91918f653..2f1bcc522a1e98db303873ac8323a64c8deccf81 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -13,8 +13,6 @@ * along with this program. If not, see . */ -#include - #include "os.h" #include "com_taosdata_jdbc_TSDBJNIConnector.h" #include "taos.h" @@ -63,13 +61,13 @@ jmethodID g_rowdataSetByteArrayFp; void jniGetGlobalMethod(JNIEnv *env) { // make sure init function executed once - switch (__sync_val_compare_and_swap_32(&__init, 0, 1)) { + switch (atomic_val_compare_exchange_32(&__init, 0, 1)) { case 0: break; case 1: do { taosMsleep(0); - } while (__sync_val_load_32(&__init) == 1); + } while (atomic_load_32(&__init) == 1); case 2: return; } @@ -109,10 +107,24 @@ void jniGetGlobalMethod(JNIEnv *env) { g_rowdataSetByteArrayFp = (*env)->GetMethodID(env, g_rowdataClass, "setByteArray", "(I[B)V"); (*env)->DeleteLocalRef(env, rowdataClass); - __sync_val_restore_32(&__init, 2); + atomic_store_32(&__init, 2); jniTrace("native method register finished"); } +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setAllocModeImp(JNIEnv *env, jobject jobj, jint jMode, jstring jPath, jboolean jAutoDump) { + if (jPath != NULL) { + const char *path = (*env)->GetStringUTFChars(env, jPath, NULL); + taosSetAllocMode(jMode, path, !!jAutoDump); + (*env)->ReleaseStringUTFChars(env, jPath, path); + } else { + taosSetAllocMode(jMode, NULL, !!jAutoDump); + } +} + +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_dumpMemoryLeakImp(JNIEnv *env, jobject jobj) { + taosDumpMemoryLeak(); +} + JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *env, jobject jobj, jstring jconfigDir) { if (jconfigDir != NULL) { const char *confDir = (*env)->GetStringUTFChars(env, jconfigDir, NULL); @@ -208,10 +220,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, jport); if (ret == 0) { - jniError("jobj:%p, taos:%p, connect to tdengine failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, + jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, (char *)host, (char *)user, (char *)dbname, jport); } else { - jniTrace("jobj:%p, taos:%p, connect to tdengine succeed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, + jniTrace("jobj:%p, conn:%p, connect to database succeed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, (char *)host, (char *)user, (char *)dbname, jport); } @@ -227,12 +239,12 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J jbyteArray jsql, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); + jniError("jobj:%p, connection is already closed", jobj); return JNI_CONNECTION_NULL; } if (jsql == NULL) { - jniError("jobj:%p, taos:%p, sql is null", jobj, tscon); + jniError("jobj:%p, conn:%p, sql is null", jobj, tscon); return JNI_SQL_NULL; } @@ -240,6 +252,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J char *dst = (char *)calloc(1, sizeof(char) * (len + 1)); if (dst == NULL) { + jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon); return JNI_OUT_OF_MEMORY; } @@ -248,9 +261,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J //todo handle error } + jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst); + int code = taos_query(tscon, dst); if (code != 0) { - jniError("jobj:%p, taos:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst); + jniError("jobj:%p, conn:%p, code:%d, msg:%s", jobj, tscon, code, taos_errstr(tscon)); free(dst); return JNI_TDENGINE_ERROR; } else { @@ -259,9 +274,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J if (pSql->cmd.command == TSDB_SQL_INSERT) { affectRows = taos_affected_rows(tscon); - jniTrace("jobj:%p, taos:%p, code:%d, affect rows:%d, sql:%s", jobj, tscon, code, affectRows, dst); + jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows); } else { - jniTrace("jobj:%p, taos:%p, code:%d, sql:%s", jobj, tscon, code, dst); + jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code); } free(dst); @@ -291,15 +306,17 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp( return JNI_CONNECTION_NULL; } - int num_fields = taos_field_count(tscon); - if (num_fields != 0) { - jlong ret = (jlong)taos_use_result(tscon); - jniTrace("jobj:%p, taos:%p, get resultset:%p", jobj, tscon, (void *)ret); - return ret; + jlong ret = 0; + + if (tscIsUpdateQuery(tscon)) { + ret = 0; // for update query, no result pointer + jniTrace("jobj:%p, conn:%p, no resultset", jobj, tscon); + } else { + ret = (jlong) taos_use_result(tscon); + jniTrace("jobj:%p, conn:%p, get resultset:%p", jobj, tscon, (void *) ret); } - jniTrace("jobj:%p, taos:%p, no resultset", jobj, tscon); - return 0; + return ret; } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con, @@ -311,12 +328,12 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp( } if ((void *)res == NULL) { - jniError("jobj:%p, taos:%p, resultset is null", jobj, tscon); + jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); return JNI_RESULT_SET_NULL; } taos_free_result((void *)res); - jniTrace("jobj:%p, taos:%p, free resultset:%p", jobj, tscon, (void *)res); + jniTrace("jobj:%p, conn:%p, free resultset:%p", jobj, tscon, (void *)res); return JNI_SUCCESS; } @@ -330,7 +347,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm jint ret = taos_affected_rows(tscon); - jniTrace("jobj:%p, taos:%p, affect rows:%d", jobj, tscon, (void *)con, ret); + jniTrace("jobj:%p, conn:%p, affect rows:%d", jobj, tscon, (void *)con, ret); return ret; } @@ -346,7 +363,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData TAOS_RES *result = (TAOS_RES *)res; if (result == NULL) { - jniError("jobj:%p, taos:%p, resultset is null", jobj, tscon); + jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); return JNI_RESULT_SET_NULL; } @@ -356,10 +373,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData // jobject arrayListObj = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp, ""); if (num_fields == 0) { - jniError("jobj:%p, taos:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); return JNI_NUM_OF_FIELDS_0; } else { - jniTrace("jobj:%p, taos:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); for (int i = 0; i < num_fields; ++i) { jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp); (*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type); @@ -402,7 +419,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn TAOS_RES *result = (TAOS_RES *)res; if (result == NULL) { - jniError("jobj:%p, taos:%p, resultset is null", jobj, tscon); + jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); return JNI_RESULT_SET_NULL; } @@ -410,7 +427,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn int num_fields = taos_num_fields(result); if (num_fields == 0) { - jniError("jobj:%p, taos:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); return JNI_NUM_OF_FIELDS_0; } @@ -418,10 +435,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn if (row == NULL) { int tserrno = taos_errno(tscon); if (tserrno == 0) { - jniTrace("jobj:%p, taos:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, res, num_fields); + jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, res, num_fields); return JNI_FETCH_END; } else { - jniTrace("jobj:%p, taos:%p, interruptted query", jobj, tscon); + jniTrace("jobj:%p, conn:%p, interruptted query", jobj, tscon); return JNI_RESULT_SET_NULL; } } @@ -482,10 +499,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); + jniError("jobj:%p, connection is already closed", jobj); return JNI_CONNECTION_NULL; } else { - jniTrace("jobj:%p, taos:%p, close connection success", jobj, tscon); + jniTrace("jobj:%p, conn:%p, close connection success", jobj, tscon); taos_close(tscon); return JNI_SUCCESS; } @@ -640,7 +657,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab } if (jsql == NULL) { - jniError("jobj:%p, taos:%p, sql is null", jobj, tscon); + jniError("jobj:%p, conn:%p, sql is null", jobj, tscon); return JNI_SQL_NULL; } @@ -661,4 +678,4 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) { return (*env)->NewStringUTF(env, (const char *)tsCharset); -} \ No newline at end of file +} diff --git a/src/util/src/sql.c b/src/client/src/sql.c similarity index 53% rename from src/util/src/sql.c rename to src/client/src/sql.c index 7e90e5f9397223387c9dc7613071d2958977ee25..ffcdc4bc0e75811bf2e9460eb06e122cd8c1985a 100644 --- a/src/util/src/sql.c +++ b/src/client/src/sql.c @@ -22,6 +22,8 @@ ** The following is the concatenation of all %include directives from the ** input grammar file: */ +#pragma GCC diagnostic ignored "-Wunused-variable" + #include /************ Begin %include sections from the grammar ************************/ @@ -30,8 +32,7 @@ #include #include #include - -#include "tsql.h" +#include "tscSQLParser.h" #include "tutil.h" /**************** End of %include directives **********************************/ /* These constants specify the various numeric values for terminal symbols @@ -92,26 +93,26 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 261 +#define YYNOCODE 262 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SSQLToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SQuerySQL* yy24; - tVariantList* yy56; - tSQLExprListList* yy74; - tSQLExpr* yy90; - SCreateTableSQL* yy158; - tVariant yy186; - TAOS_FIELD yy223; - SCreateAcctSQL yy279; - SLimitVal yy294; - int yy332; - int64_t yy389; - SCreateDBInfo yy398; - tFieldList* yy471; - tSQLExprList* yy498; + SQuerySQL* yy138; + SCreateAcctSQL yy155; + SLimitVal yy162; + int yy220; + tVariant yy236; + tSQLExprListList* yy237; + tSQLExpr* yy244; + SCreateDBInfo yy262; + tSQLExprList* yy284; + SCreateTableSQL* yy344; + int64_t yy369; + TAOS_FIELD yy397; + tFieldList* yy421; + tVariantList* yy480; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -121,16 +122,16 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 251 -#define YYNRULE 213 -#define YY_MAX_SHIFT 250 -#define YY_MIN_SHIFTREDUCE 401 -#define YY_MAX_SHIFTREDUCE 613 -#define YY_MIN_REDUCE 614 -#define YY_MAX_REDUCE 826 -#define YY_ERROR_ACTION 827 -#define YY_ACCEPT_ACTION 828 -#define YY_NO_ACTION 829 +#define YYNSTATE 252 +#define YYNRULE 216 +#define YY_MAX_SHIFT 251 +#define YY_MIN_SHIFTREDUCE 403 +#define YY_MAX_SHIFTREDUCE 618 +#define YY_MIN_REDUCE 619 +#define YY_MAX_REDUCE 834 +#define YY_ERROR_ACTION 835 +#define YY_ACCEPT_ACTION 836 +#define YY_NO_ACTION 837 /************* End control #defines *******************************************/ /* The yyzerominor constant is used to initialize instances of @@ -202,196 +203,198 @@ static const YYMINORTYPE yyzerominor = { 0 }; ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (529) +#define YY_ACTTAB_COUNT (531) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 439, 36, 35, 153, 249, 34, 33, 32, 440, 34, - /* 10 */ 33, 32, 43, 45, 49, 37, 38, 74, 78, 244, - /* 20 */ 31, 85, 77, 205, 41, 39, 42, 40, 80, 133, - /* 30 */ 101, 50, 36, 35, 527, 171, 34, 33, 32, 43, - /* 40 */ 45, 154, 37, 38, 114, 115, 224, 31, 65, 68, - /* 50 */ 205, 41, 39, 42, 40, 76, 133, 828, 250, 36, - /* 60 */ 35, 241, 241, 34, 33, 32, 43, 45, 155, 37, - /* 70 */ 38, 128, 126, 245, 31, 89, 88, 205, 41, 39, - /* 80 */ 42, 40, 202, 524, 59, 135, 36, 35, 439, 21, - /* 90 */ 34, 33, 32, 520, 159, 596, 440, 10, 57, 172, - /* 100 */ 135, 135, 227, 226, 101, 45, 439, 37, 38, 158, - /* 110 */ 596, 595, 31, 156, 440, 205, 41, 39, 42, 40, - /* 120 */ 232, 167, 564, 507, 36, 35, 166, 21, 34, 33, - /* 130 */ 32, 510, 402, 403, 404, 405, 406, 407, 408, 409, - /* 140 */ 410, 411, 412, 413, 510, 37, 38, 243, 132, 508, - /* 150 */ 31, 220, 101, 205, 41, 39, 42, 40, 551, 168, - /* 160 */ 200, 507, 36, 35, 97, 134, 34, 33, 32, 510, - /* 170 */ 21, 139, 101, 17, 219, 242, 218, 217, 216, 215, - /* 180 */ 214, 213, 212, 211, 492, 21, 481, 482, 483, 484, - /* 190 */ 485, 486, 487, 488, 489, 490, 491, 163, 577, 11, - /* 200 */ 243, 568, 228, 571, 507, 574, 550, 163, 577, 498, - /* 210 */ 21, 568, 509, 571, 193, 574, 148, 233, 7, 507, - /* 220 */ 561, 62, 111, 87, 86, 142, 60, 178, 242, 160, - /* 230 */ 161, 147, 437, 204, 186, 124, 183, 230, 229, 160, - /* 240 */ 161, 163, 577, 525, 506, 568, 570, 571, 573, 574, - /* 250 */ 41, 39, 42, 40, 495, 61, 494, 27, 36, 35, - /* 260 */ 545, 546, 34, 33, 32, 514, 28, 600, 511, 162, - /* 270 */ 512, 29, 513, 160, 161, 192, 446, 566, 188, 124, - /* 280 */ 248, 247, 422, 438, 522, 150, 124, 18, 601, 536, - /* 290 */ 537, 44, 29, 47, 15, 594, 169, 170, 578, 14, - /* 300 */ 576, 44, 14, 569, 518, 572, 519, 2, 52, 516, - /* 310 */ 576, 517, 504, 567, 503, 575, 47, 592, 22, 591, - /* 320 */ 209, 73, 72, 53, 22, 575, 9, 8, 84, 83, - /* 330 */ 590, 151, 152, 140, 501, 44, 610, 141, 560, 143, - /* 340 */ 144, 145, 146, 137, 576, 131, 138, 136, 164, 557, - /* 350 */ 556, 165, 526, 231, 110, 98, 112, 113, 448, 575, - /* 360 */ 543, 542, 210, 129, 515, 25, 223, 225, 609, 70, - /* 370 */ 189, 608, 606, 116, 466, 26, 23, 130, 435, 79, - /* 380 */ 433, 81, 431, 191, 430, 173, 125, 428, 427, 426, - /* 390 */ 424, 91, 532, 194, 198, 54, 417, 127, 51, 521, - /* 400 */ 421, 203, 419, 46, 102, 95, 201, 530, 103, 531, - /* 410 */ 544, 195, 199, 197, 30, 27, 222, 235, 75, 234, - /* 420 */ 236, 207, 238, 55, 237, 239, 240, 246, 149, 613, - /* 430 */ 63, 66, 174, 429, 175, 176, 90, 92, 177, 423, - /* 440 */ 119, 612, 118, 467, 117, 120, 121, 179, 122, 123, - /* 450 */ 1, 505, 108, 104, 105, 106, 107, 109, 24, 180, - /* 460 */ 181, 182, 611, 184, 185, 604, 12, 13, 187, 190, - /* 470 */ 96, 533, 99, 157, 58, 538, 196, 100, 19, 4, - /* 480 */ 579, 3, 16, 20, 64, 5, 206, 6, 208, 479, - /* 490 */ 478, 477, 476, 475, 474, 473, 472, 470, 47, 221, - /* 500 */ 443, 67, 445, 22, 500, 48, 499, 497, 464, 56, - /* 510 */ 462, 454, 69, 460, 456, 71, 458, 452, 450, 471, - /* 520 */ 469, 82, 441, 425, 415, 93, 614, 616, 94, + /* 0 */ 443, 74, 78, 244, 85, 77, 153, 249, 444, 836, + /* 10 */ 251, 80, 43, 45, 7, 37, 38, 62, 111, 171, + /* 20 */ 31, 443, 443, 205, 41, 39, 42, 40, 241, 444, + /* 30 */ 444, 135, 36, 35, 10, 101, 34, 33, 32, 43, + /* 40 */ 45, 600, 37, 38, 156, 524, 135, 31, 135, 133, + /* 50 */ 205, 41, 39, 42, 40, 159, 601, 158, 601, 36, + /* 60 */ 35, 154, 514, 34, 33, 32, 404, 405, 406, 407, + /* 70 */ 408, 409, 410, 411, 412, 413, 414, 415, 250, 21, + /* 80 */ 43, 45, 172, 37, 38, 227, 226, 202, 31, 59, + /* 90 */ 21, 205, 41, 39, 42, 40, 34, 33, 32, 57, + /* 100 */ 36, 35, 550, 551, 34, 33, 32, 45, 232, 37, + /* 110 */ 38, 167, 132, 511, 31, 21, 21, 205, 41, 39, + /* 120 */ 42, 40, 168, 569, 511, 502, 36, 35, 134, 178, + /* 130 */ 34, 33, 32, 243, 37, 38, 186, 512, 183, 31, + /* 140 */ 532, 101, 205, 41, 39, 42, 40, 228, 233, 511, + /* 150 */ 511, 36, 35, 230, 229, 34, 33, 32, 17, 219, + /* 160 */ 242, 218, 217, 216, 215, 214, 213, 212, 211, 496, + /* 170 */ 139, 485, 486, 487, 488, 489, 490, 491, 492, 493, + /* 180 */ 494, 495, 163, 582, 11, 97, 573, 133, 576, 529, + /* 190 */ 579, 597, 163, 582, 166, 556, 573, 200, 576, 155, + /* 200 */ 579, 36, 35, 148, 220, 34, 33, 32, 21, 87, + /* 210 */ 86, 142, 514, 243, 160, 161, 101, 147, 204, 248, + /* 220 */ 247, 426, 514, 76, 160, 161, 163, 582, 530, 241, + /* 230 */ 573, 101, 576, 513, 579, 193, 41, 39, 42, 40, + /* 240 */ 242, 596, 510, 27, 36, 35, 49, 571, 34, 33, + /* 250 */ 32, 114, 115, 224, 65, 68, 505, 441, 160, 161, + /* 260 */ 124, 192, 518, 50, 188, 515, 499, 516, 498, 517, + /* 270 */ 555, 150, 128, 126, 245, 89, 88, 44, 450, 442, + /* 280 */ 61, 124, 124, 572, 595, 60, 581, 44, 575, 527, + /* 290 */ 578, 28, 18, 169, 170, 605, 581, 162, 606, 29, + /* 300 */ 541, 580, 29, 542, 47, 52, 599, 15, 151, 583, + /* 310 */ 14, 580, 574, 14, 577, 508, 73, 72, 507, 47, + /* 320 */ 53, 44, 22, 209, 522, 152, 523, 22, 140, 520, + /* 330 */ 581, 521, 9, 8, 2, 84, 83, 141, 143, 144, + /* 340 */ 145, 615, 146, 137, 131, 580, 138, 136, 531, 566, + /* 350 */ 98, 565, 164, 562, 561, 165, 231, 548, 547, 189, + /* 360 */ 112, 113, 519, 452, 110, 210, 129, 25, 191, 223, + /* 370 */ 225, 614, 70, 613, 611, 116, 470, 26, 23, 130, + /* 380 */ 439, 91, 79, 437, 81, 435, 434, 537, 194, 198, + /* 390 */ 173, 54, 125, 432, 431, 430, 428, 421, 525, 127, + /* 400 */ 425, 51, 423, 102, 46, 203, 103, 104, 95, 199, + /* 410 */ 201, 535, 197, 30, 536, 549, 195, 27, 222, 75, + /* 420 */ 234, 235, 236, 237, 207, 55, 238, 239, 240, 246, + /* 430 */ 149, 618, 63, 66, 175, 433, 174, 176, 177, 617, + /* 440 */ 180, 427, 119, 90, 118, 471, 117, 120, 122, 121, + /* 450 */ 123, 92, 509, 1, 24, 182, 107, 105, 106, 108, + /* 460 */ 109, 179, 181, 616, 184, 185, 12, 609, 190, 187, + /* 470 */ 13, 157, 96, 538, 99, 196, 58, 4, 19, 543, + /* 480 */ 100, 5, 584, 3, 20, 16, 206, 6, 208, 64, + /* 490 */ 483, 482, 481, 480, 479, 478, 477, 476, 474, 47, + /* 500 */ 447, 449, 67, 22, 504, 221, 503, 501, 56, 468, + /* 510 */ 466, 48, 458, 464, 69, 460, 462, 456, 454, 475, + /* 520 */ 71, 473, 82, 429, 445, 419, 417, 93, 619, 621, + /* 530 */ 94, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 1, 33, 34, 198, 199, 37, 38, 39, 9, 37, - /* 10 */ 38, 39, 13, 14, 100, 16, 17, 62, 63, 64, - /* 20 */ 21, 66, 67, 24, 25, 26, 27, 28, 73, 247, - /* 30 */ 199, 117, 33, 34, 199, 61, 37, 38, 39, 13, - /* 40 */ 14, 259, 16, 17, 62, 63, 64, 21, 66, 67, - /* 50 */ 24, 25, 26, 27, 28, 71, 247, 196, 197, 33, - /* 60 */ 34, 77, 77, 37, 38, 39, 13, 14, 259, 16, - /* 70 */ 17, 62, 63, 64, 21, 66, 67, 24, 25, 26, - /* 80 */ 27, 28, 251, 248, 253, 247, 33, 34, 1, 199, - /* 90 */ 37, 38, 39, 232, 256, 257, 9, 247, 99, 125, - /* 100 */ 247, 247, 128, 129, 199, 14, 1, 16, 17, 256, - /* 110 */ 257, 257, 21, 216, 9, 24, 25, 26, 27, 28, - /* 120 */ 199, 231, 96, 233, 33, 34, 216, 199, 37, 38, - /* 130 */ 39, 234, 45, 46, 47, 48, 49, 50, 51, 52, - /* 140 */ 53, 54, 55, 56, 234, 16, 17, 58, 247, 228, - /* 150 */ 21, 216, 199, 24, 25, 26, 27, 28, 253, 231, - /* 160 */ 255, 233, 33, 34, 199, 247, 37, 38, 39, 234, - /* 170 */ 199, 247, 199, 84, 85, 86, 87, 88, 89, 90, - /* 180 */ 91, 92, 93, 94, 215, 199, 217, 218, 219, 220, - /* 190 */ 221, 222, 223, 224, 225, 226, 227, 1, 2, 44, - /* 200 */ 58, 5, 231, 7, 233, 9, 253, 1, 2, 5, - /* 210 */ 199, 5, 234, 7, 249, 9, 61, 231, 95, 233, - /* 220 */ 229, 98, 99, 68, 69, 70, 253, 124, 86, 33, - /* 230 */ 34, 76, 203, 37, 131, 206, 133, 33, 34, 33, - /* 240 */ 34, 1, 2, 37, 233, 5, 5, 7, 7, 9, - /* 250 */ 25, 26, 27, 28, 217, 235, 219, 102, 33, 34, - /* 260 */ 110, 111, 37, 38, 39, 2, 246, 96, 5, 57, - /* 270 */ 7, 100, 9, 33, 34, 120, 203, 1, 123, 206, - /* 280 */ 58, 59, 60, 203, 100, 130, 206, 103, 96, 96, - /* 290 */ 96, 95, 100, 100, 100, 96, 33, 34, 96, 100, - /* 300 */ 104, 95, 100, 5, 5, 7, 7, 95, 100, 5, - /* 310 */ 104, 7, 96, 37, 96, 119, 100, 247, 100, 247, - /* 320 */ 96, 126, 127, 115, 100, 119, 126, 127, 71, 72, - /* 330 */ 247, 247, 247, 247, 230, 95, 234, 247, 229, 247, - /* 340 */ 247, 247, 247, 247, 104, 247, 247, 247, 229, 229, - /* 350 */ 229, 229, 199, 229, 236, 199, 199, 199, 199, 119, - /* 360 */ 254, 254, 199, 199, 101, 199, 199, 199, 199, 199, - /* 370 */ 122, 199, 199, 199, 199, 199, 199, 199, 199, 199, - /* 380 */ 199, 199, 199, 258, 199, 199, 199, 199, 199, 199, - /* 390 */ 199, 57, 104, 250, 250, 114, 199, 199, 116, 245, - /* 400 */ 199, 108, 199, 113, 244, 200, 112, 200, 243, 200, - /* 410 */ 200, 105, 107, 106, 118, 102, 74, 49, 83, 82, - /* 420 */ 79, 200, 53, 200, 81, 80, 78, 74, 200, 5, - /* 430 */ 204, 204, 132, 200, 5, 132, 201, 201, 65, 200, - /* 440 */ 208, 5, 212, 214, 213, 211, 209, 132, 210, 207, - /* 450 */ 205, 232, 238, 242, 241, 240, 239, 237, 202, 5, - /* 460 */ 132, 65, 5, 132, 65, 85, 95, 95, 124, 122, - /* 470 */ 121, 96, 95, 1, 100, 96, 95, 95, 100, 109, - /* 480 */ 96, 95, 95, 100, 71, 109, 97, 95, 97, 9, - /* 490 */ 5, 5, 5, 5, 1, 5, 5, 5, 100, 15, - /* 500 */ 75, 71, 65, 100, 5, 16, 5, 96, 5, 95, - /* 510 */ 5, 5, 127, 5, 5, 127, 5, 5, 5, 5, - /* 520 */ 5, 65, 75, 65, 57, 21, 0, 260, 21, + /* 0 */ 1, 64, 65, 66, 67, 68, 199, 200, 9, 197, + /* 10 */ 198, 74, 13, 14, 96, 16, 17, 99, 100, 63, + /* 20 */ 21, 1, 1, 24, 25, 26, 27, 28, 78, 9, + /* 30 */ 9, 248, 33, 34, 248, 200, 37, 38, 39, 13, + /* 40 */ 14, 258, 16, 17, 217, 233, 248, 21, 248, 248, + /* 50 */ 24, 25, 26, 27, 28, 257, 258, 257, 258, 33, + /* 60 */ 34, 260, 235, 37, 38, 39, 45, 46, 47, 48, + /* 70 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 200, + /* 80 */ 13, 14, 126, 16, 17, 129, 130, 252, 21, 254, + /* 90 */ 200, 24, 25, 26, 27, 28, 37, 38, 39, 100, + /* 100 */ 33, 34, 111, 112, 37, 38, 39, 14, 200, 16, + /* 110 */ 17, 232, 248, 234, 21, 200, 200, 24, 25, 26, + /* 120 */ 27, 28, 232, 97, 234, 5, 33, 34, 248, 125, + /* 130 */ 37, 38, 39, 60, 16, 17, 132, 229, 134, 21, + /* 140 */ 200, 200, 24, 25, 26, 27, 28, 232, 232, 234, + /* 150 */ 234, 33, 34, 33, 34, 37, 38, 39, 85, 86, + /* 160 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 216, + /* 170 */ 248, 218, 219, 220, 221, 222, 223, 224, 225, 226, + /* 180 */ 227, 228, 1, 2, 44, 200, 5, 248, 7, 249, + /* 190 */ 9, 248, 1, 2, 217, 254, 5, 256, 7, 260, + /* 200 */ 9, 33, 34, 63, 217, 37, 38, 39, 200, 69, + /* 210 */ 70, 71, 235, 60, 33, 34, 200, 77, 37, 60, + /* 220 */ 61, 62, 235, 72, 33, 34, 1, 2, 37, 78, + /* 230 */ 5, 200, 7, 235, 9, 250, 25, 26, 27, 28, + /* 240 */ 87, 248, 234, 103, 33, 34, 101, 1, 37, 38, + /* 250 */ 39, 64, 65, 66, 67, 68, 231, 204, 33, 34, + /* 260 */ 207, 121, 2, 118, 124, 5, 218, 7, 220, 9, + /* 270 */ 254, 131, 64, 65, 66, 67, 68, 96, 204, 204, + /* 280 */ 236, 207, 207, 37, 248, 254, 105, 96, 5, 101, + /* 290 */ 7, 247, 104, 33, 34, 97, 105, 59, 97, 101, + /* 300 */ 97, 120, 101, 97, 101, 101, 97, 101, 248, 97, + /* 310 */ 101, 120, 5, 101, 7, 97, 127, 128, 97, 101, + /* 320 */ 116, 96, 101, 97, 5, 248, 7, 101, 248, 5, + /* 330 */ 105, 7, 127, 128, 96, 72, 73, 248, 248, 248, + /* 340 */ 248, 235, 248, 248, 248, 120, 248, 248, 200, 230, + /* 350 */ 200, 230, 230, 230, 230, 230, 230, 255, 255, 123, + /* 360 */ 200, 200, 102, 200, 237, 200, 200, 200, 259, 200, + /* 370 */ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, + /* 380 */ 200, 59, 200, 200, 200, 200, 200, 105, 251, 251, + /* 390 */ 200, 115, 200, 200, 200, 200, 200, 200, 246, 200, + /* 400 */ 200, 117, 200, 245, 114, 109, 244, 243, 201, 108, + /* 410 */ 113, 201, 107, 119, 201, 201, 106, 103, 75, 84, + /* 420 */ 83, 49, 80, 82, 201, 201, 53, 81, 79, 75, + /* 430 */ 201, 5, 205, 205, 5, 201, 133, 133, 58, 5, + /* 440 */ 5, 201, 209, 202, 213, 215, 214, 212, 211, 210, + /* 450 */ 208, 202, 233, 206, 203, 58, 240, 242, 241, 239, + /* 460 */ 238, 133, 133, 5, 133, 58, 96, 86, 123, 125, + /* 470 */ 96, 1, 122, 97, 96, 96, 101, 110, 101, 97, + /* 480 */ 96, 110, 97, 96, 101, 96, 98, 96, 98, 72, + /* 490 */ 9, 5, 5, 5, 5, 1, 5, 5, 5, 101, + /* 500 */ 76, 58, 72, 101, 5, 15, 5, 97, 96, 5, + /* 510 */ 5, 16, 5, 5, 128, 5, 5, 5, 5, 5, + /* 520 */ 128, 5, 58, 58, 76, 59, 58, 21, 0, 261, + /* 530 */ 21, }; -#define YY_SHIFT_USE_DFLT (-87) -#define YY_SHIFT_COUNT (250) -#define YY_SHIFT_MIN (-86) -#define YY_SHIFT_MAX (526) +#define YY_SHIFT_USE_DFLT (-83) +#define YY_SHIFT_COUNT (251) +#define YY_SHIFT_MIN (-82) +#define YY_SHIFT_MAX (528) static const short yy_shift_ofst[] = { - /* 0 */ 155, 89, 196, 240, 105, 105, 105, 105, 105, 105, - /* 10 */ -1, 87, 240, 240, 240, 263, 263, 263, 105, 105, - /* 20 */ 105, 105, 105, -16, 142, -15, -15, -87, 206, 240, - /* 30 */ 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, - /* 40 */ 240, 240, 240, 240, 240, 240, 240, 263, 263, 204, - /* 50 */ 204, 204, 204, 204, 204, 123, 204, 105, 105, 150, - /* 60 */ 150, 184, 105, 105, 105, 105, 105, 105, 105, 105, - /* 70 */ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, - /* 80 */ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, - /* 90 */ 105, 105, 105, 105, 105, 248, 334, 334, 334, 288, - /* 100 */ 288, 334, 281, 282, 290, 293, 294, 305, 307, 306, - /* 110 */ 296, 313, 334, 334, 342, 342, 334, 335, 337, 368, - /* 120 */ 341, 343, 369, 345, 348, 334, 353, 334, 353, -87, - /* 130 */ -87, 26, 53, 53, 53, 53, 53, 91, 129, 225, - /* 140 */ 225, 225, -45, -32, -32, -32, -32, -18, 9, -26, - /* 150 */ 103, -28, -28, 222, 171, 192, 193, 194, 199, 202, - /* 160 */ 241, 298, 276, 212, -86, 208, 216, 218, 224, 299, - /* 170 */ 304, 195, 200, 257, 424, 300, 429, 303, 373, 436, - /* 180 */ 315, 454, 328, 396, 457, 331, 399, 380, 344, 371, - /* 190 */ 372, 347, 349, 374, 375, 377, 472, 381, 379, 382, - /* 200 */ 378, 370, 383, 376, 384, 386, 387, 389, 392, 391, - /* 210 */ 413, 480, 485, 486, 487, 488, 493, 490, 491, 492, - /* 220 */ 398, 425, 484, 430, 437, 489, 385, 388, 403, 499, - /* 230 */ 501, 411, 414, 403, 503, 505, 506, 508, 509, 511, - /* 240 */ 512, 513, 514, 515, 456, 458, 447, 504, 507, 467, - /* 250 */ 526, + /* 0 */ 140, 73, 181, 225, 20, 20, 20, 20, 20, 20, + /* 10 */ -1, 21, 225, 225, 225, 260, 260, 260, 20, 20, + /* 20 */ 20, 20, 20, 151, 153, -50, -50, -83, 191, 225, + /* 30 */ 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, + /* 40 */ 225, 225, 225, 225, 225, 225, 225, 260, 260, 120, + /* 50 */ 120, 120, 120, 120, 120, -82, 120, 20, 20, -9, + /* 60 */ -9, 188, 20, 20, 20, 20, 20, 20, 20, 20, + /* 70 */ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + /* 80 */ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + /* 90 */ 20, 20, 20, 20, 20, 236, 322, 322, 322, 282, + /* 100 */ 282, 322, 276, 284, 290, 296, 297, 301, 305, 310, + /* 110 */ 294, 314, 322, 322, 343, 343, 322, 335, 337, 372, + /* 120 */ 342, 341, 373, 346, 349, 322, 354, 322, 354, -83, + /* 130 */ -83, 26, 67, 67, 67, 67, 67, 93, 118, 211, + /* 140 */ 211, 211, -63, 168, 168, 168, 168, 187, 208, -44, + /* 150 */ 4, 59, 59, 159, 198, 201, 203, 206, 209, 212, + /* 160 */ 283, 307, 246, 238, 145, 204, 218, 221, 226, 319, + /* 170 */ 324, 189, 205, 263, 426, 303, 429, 304, 380, 434, + /* 180 */ 328, 435, 329, 397, 458, 331, 407, 381, 344, 370, + /* 190 */ 374, 345, 350, 375, 376, 378, 470, 379, 382, 384, + /* 200 */ 377, 367, 383, 371, 385, 387, 389, 388, 391, 390, + /* 210 */ 417, 481, 486, 487, 488, 489, 494, 491, 492, 493, + /* 220 */ 398, 424, 490, 430, 443, 495, 386, 392, 402, 499, + /* 230 */ 501, 410, 412, 402, 504, 505, 507, 508, 510, 511, + /* 240 */ 512, 513, 514, 516, 464, 465, 448, 506, 509, 466, + /* 250 */ 468, 528, }; -#define YY_REDUCE_USE_DFLT (-219) +#define YY_REDUCE_USE_DFLT (-218) #define YY_REDUCE_COUNT (130) -#define YY_REDUCE_MIN (-218) -#define YY_REDUCE_MAX (256) +#define YY_REDUCE_MIN (-217) +#define YY_REDUCE_MAX (251) static const short yy_reduce_ofst[] = { - /* 0 */ -139, -31, -162, -147, -95, -169, -110, -72, -29, -14, - /* 10 */ -165, -195, -218, -191, -146, -103, -90, -65, -35, -47, - /* 20 */ -27, -79, 11, 29, 37, 73, 80, 20, -150, -99, - /* 30 */ -82, -76, 70, 72, 83, 84, 85, 86, 90, 92, - /* 40 */ 93, 94, 95, 96, 98, 99, 100, -22, 102, -9, - /* 50 */ 109, 119, 120, 121, 122, 104, 124, 153, 156, 106, - /* 60 */ 107, 118, 157, 158, 159, 163, 164, 166, 167, 168, - /* 70 */ 169, 170, 172, 173, 174, 175, 176, 177, 178, 179, - /* 80 */ 180, 181, 182, 183, 185, 186, 187, 188, 189, 190, - /* 90 */ 191, 197, 198, 201, 203, 125, 205, 207, 209, 143, - /* 100 */ 144, 210, 154, 160, 165, 211, 213, 215, 217, 214, - /* 110 */ 220, 219, 221, 223, 226, 227, 228, 229, 231, 230, - /* 120 */ 232, 234, 237, 238, 242, 233, 235, 239, 236, 245, - /* 130 */ 256, + /* 0 */ -188, -47, -202, -200, -59, -165, -121, -110, -85, -84, + /* 10 */ -60, -193, -199, -61, -217, -173, -23, -13, -15, 16, + /* 20 */ 31, -92, 8, 53, 48, 74, 75, 44, -214, -136, + /* 30 */ -120, -78, -57, -7, 36, 60, 77, 80, 89, 90, + /* 40 */ 91, 92, 94, 95, 96, 98, 99, -2, 106, 119, + /* 50 */ 121, 122, 123, 124, 125, 25, 126, 148, 150, 102, + /* 60 */ 103, 127, 160, 161, 163, 165, 166, 167, 169, 170, + /* 70 */ 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + /* 80 */ 182, 183, 184, 185, 186, 190, 192, 193, 194, 195, + /* 90 */ 196, 197, 199, 200, 202, 109, 207, 210, 213, 137, + /* 100 */ 138, 214, 152, 158, 162, 164, 215, 217, 216, 220, + /* 110 */ 222, 219, 223, 224, 227, 228, 229, 230, 232, 231, + /* 120 */ 233, 235, 239, 237, 242, 234, 241, 240, 249, 247, + /* 130 */ 251, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 827, 660, 811, 811, 827, 827, 827, 827, 827, 827, - /* 10 */ 741, 627, 827, 827, 811, 827, 827, 827, 827, 827, - /* 20 */ 827, 827, 827, 662, 649, 662, 662, 736, 827, 827, - /* 30 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, - /* 40 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, - /* 50 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 760, - /* 60 */ 760, 827, 827, 827, 827, 827, 827, 827, 827, 827, - /* 70 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 647, - /* 80 */ 827, 645, 827, 827, 827, 827, 827, 827, 827, 827, - /* 90 */ 827, 827, 827, 827, 827, 827, 629, 629, 629, 827, - /* 100 */ 827, 629, 767, 771, 765, 753, 761, 752, 748, 747, - /* 110 */ 775, 827, 629, 629, 657, 657, 629, 678, 676, 674, - /* 120 */ 666, 672, 668, 670, 664, 629, 655, 629, 655, 693, - /* 130 */ 706, 827, 815, 816, 776, 810, 766, 794, 793, 806, - /* 140 */ 800, 799, 827, 798, 797, 796, 795, 827, 827, 827, - /* 150 */ 827, 802, 801, 827, 827, 827, 827, 827, 827, 827, - /* 160 */ 827, 827, 827, 778, 772, 768, 827, 827, 827, 827, - /* 170 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, - /* 180 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, - /* 190 */ 827, 812, 827, 742, 827, 827, 827, 827, 827, 827, - /* 200 */ 762, 827, 754, 827, 827, 827, 827, 827, 827, 715, - /* 210 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, - /* 220 */ 681, 827, 827, 827, 827, 827, 827, 827, 820, 827, - /* 230 */ 827, 827, 709, 818, 827, 827, 827, 827, 827, 827, - /* 240 */ 827, 827, 827, 827, 827, 827, 827, 633, 631, 827, - /* 250 */ 827, + /* 0 */ 835, 667, 819, 819, 835, 835, 835, 835, 835, 835, + /* 10 */ 749, 634, 835, 835, 819, 835, 835, 835, 835, 835, + /* 20 */ 835, 835, 835, 669, 656, 669, 669, 744, 835, 835, + /* 30 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, + /* 40 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, + /* 50 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 768, + /* 60 */ 768, 742, 835, 835, 835, 835, 835, 835, 835, 835, + /* 70 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 654, + /* 80 */ 835, 652, 835, 835, 835, 835, 835, 835, 835, 835, + /* 90 */ 835, 835, 835, 835, 835, 835, 636, 636, 636, 835, + /* 100 */ 835, 636, 775, 779, 773, 761, 769, 760, 756, 755, + /* 110 */ 783, 835, 636, 636, 664, 664, 636, 685, 683, 681, + /* 120 */ 673, 679, 675, 677, 671, 636, 662, 636, 662, 700, + /* 130 */ 713, 835, 823, 824, 784, 818, 774, 802, 801, 814, + /* 140 */ 808, 807, 835, 806, 805, 804, 803, 835, 835, 835, + /* 150 */ 835, 810, 809, 835, 835, 835, 835, 835, 835, 835, + /* 160 */ 835, 835, 835, 786, 780, 776, 835, 835, 835, 835, + /* 170 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, + /* 180 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, + /* 190 */ 835, 820, 835, 750, 835, 835, 835, 835, 835, 835, + /* 200 */ 770, 835, 762, 835, 835, 835, 835, 835, 835, 722, + /* 210 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, + /* 220 */ 688, 835, 835, 835, 835, 835, 835, 835, 828, 835, + /* 230 */ 835, 835, 716, 826, 835, 835, 835, 835, 835, 835, + /* 240 */ 835, 835, 835, 835, 835, 835, 835, 640, 638, 835, + /* 250 */ 632, 835, }; /********** End of lemon-generated parsing tables *****************************/ @@ -468,6 +471,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* CONFIGS => nothing */ 0, /* SCORES => nothing */ 0, /* GRANTS => nothing */ + 0, /* VNODES => nothing */ + 1, /* IPTOKEN => ID */ 0, /* DOT => nothing */ 0, /* TABLES => nothing */ 0, /* STABLES => nothing */ @@ -476,7 +481,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* TABLE => nothing */ 1, /* DATABASE => ID */ 0, /* DNODE => nothing */ - 1, /* IP => ID */ 0, /* USER => nothing */ 0, /* ACCOUNT => nothing */ 0, /* USE => nothing */ @@ -512,7 +516,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* USING => nothing */ 0, /* AS => nothing */ 0, /* COMMA => nothing */ - 0, /* NULL => nothing */ + 1, /* NULL => ID */ 0, /* SELECT => nothing */ 0, /* FROM => nothing */ 0, /* VARIABLE => nothing */ @@ -702,57 +706,58 @@ static const char *const yyTokenName[] = { "SHOW", "DATABASES", "MNODES", "DNODES", "ACCOUNTS", "USERS", "MODULES", "QUERIES", "CONNECTIONS", "STREAMS", "CONFIGS", "SCORES", - "GRANTS", "DOT", "TABLES", "STABLES", - "VGROUPS", "DROP", "TABLE", "DATABASE", - "DNODE", "IP", "USER", "ACCOUNT", - "USE", "DESCRIBE", "ALTER", "PASS", - "PRIVILEGE", "LOCAL", "IF", "EXISTS", - "CREATE", "PPS", "TSERIES", "DBS", - "STORAGE", "QTIME", "CONNS", "STATE", - "KEEP", "CACHE", "REPLICA", "DAYS", - "ROWS", "ABLOCKS", "TBLOCKS", "CTIME", - "CLOG", "COMP", "PRECISION", "LP", - "RP", "TAGS", "USING", "AS", - "COMMA", "NULL", "SELECT", "FROM", - "VARIABLE", "INTERVAL", "FILL", "SLIDING", - "ORDER", "BY", "ASC", "DESC", - "GROUP", "HAVING", "LIMIT", "OFFSET", - "SLIMIT", "SOFFSET", "WHERE", "NOW", - "INSERT", "INTO", "VALUES", "RESET", - "QUERY", "ADD", "COLUMN", "TAG", - "CHANGE", "SET", "KILL", "CONNECTION", - "COLON", "STREAM", "ABORT", "AFTER", - "ATTACH", "BEFORE", "BEGIN", "CASCADE", - "CLUSTER", "CONFLICT", "COPY", "DEFERRED", - "DELIMITERS", "DETACH", "EACH", "END", - "EXPLAIN", "FAIL", "FOR", "IGNORE", - "IMMEDIATE", "INITIALLY", "INSTEAD", "MATCH", - "KEY", "OF", "RAISE", "REPLACE", - "RESTRICT", "ROW", "STATEMENT", "TRIGGER", - "VIEW", "ALL", "COUNT", "SUM", - "AVG", "MIN", "MAX", "FIRST", - "LAST", "TOP", "BOTTOM", "STDDEV", - "PERCENTILE", "APERCENTILE", "LEASTSQUARES", "HISTOGRAM", - "DIFF", "SPREAD", "TWA", "INTERP", - "LAST_ROW", "SEMI", "NONE", "PREV", - "LINEAR", "IMPORT", "METRIC", "TBNAME", - "JOIN", "METRICS", "STABLE", "error", - "program", "cmd", "dbPrefix", "ids", - "cpxName", "ifexists", "alter_db_optr", "acct_optr", - "ifnotexists", "db_optr", "pps", "tseries", - "dbs", "streams", "storage", "qtime", - "users", "conns", "state", "keep", - "tagitemlist", "tables", "cache", "replica", - "days", "rows", "ablocks", "tblocks", - "ctime", "clog", "comp", "prec", - "typename", "signed", "create_table_args", "columnlist", - "select", "column", "tagitem", "selcollist", - "from", "where_opt", "interval_opt", "fill_opt", - "sliding_opt", "groupby_opt", "orderby_opt", "having_opt", - "slimit_opt", "limit_opt", "sclp", "expr", - "as", "tablelist", "tmvar", "sortlist", - "sortitem", "item", "sortorder", "grouplist", - "exprlist", "expritem", "insert_value_list", "itemlist", + "GRANTS", "VNODES", "IPTOKEN", "DOT", + "TABLES", "STABLES", "VGROUPS", "DROP", + "TABLE", "DATABASE", "DNODE", "USER", + "ACCOUNT", "USE", "DESCRIBE", "ALTER", + "PASS", "PRIVILEGE", "LOCAL", "IF", + "EXISTS", "CREATE", "PPS", "TSERIES", + "DBS", "STORAGE", "QTIME", "CONNS", + "STATE", "KEEP", "CACHE", "REPLICA", + "DAYS", "ROWS", "ABLOCKS", "TBLOCKS", + "CTIME", "CLOG", "COMP", "PRECISION", + "LP", "RP", "TAGS", "USING", + "AS", "COMMA", "NULL", "SELECT", + "FROM", "VARIABLE", "INTERVAL", "FILL", + "SLIDING", "ORDER", "BY", "ASC", + "DESC", "GROUP", "HAVING", "LIMIT", + "OFFSET", "SLIMIT", "SOFFSET", "WHERE", + "NOW", "INSERT", "INTO", "VALUES", + "RESET", "QUERY", "ADD", "COLUMN", + "TAG", "CHANGE", "SET", "KILL", + "CONNECTION", "COLON", "STREAM", "ABORT", + "AFTER", "ATTACH", "BEFORE", "BEGIN", + "CASCADE", "CLUSTER", "CONFLICT", "COPY", + "DEFERRED", "DELIMITERS", "DETACH", "EACH", + "END", "EXPLAIN", "FAIL", "FOR", + "IGNORE", "IMMEDIATE", "INITIALLY", "INSTEAD", + "MATCH", "KEY", "OF", "RAISE", + "REPLACE", "RESTRICT", "ROW", "STATEMENT", + "TRIGGER", "VIEW", "ALL", "COUNT", + "SUM", "AVG", "MIN", "MAX", + "FIRST", "LAST", "TOP", "BOTTOM", + "STDDEV", "PERCENTILE", "APERCENTILE", "LEASTSQUARES", + "HISTOGRAM", "DIFF", "SPREAD", "TWA", + "INTERP", "LAST_ROW", "SEMI", "NONE", + "PREV", "LINEAR", "IMPORT", "METRIC", + "TBNAME", "JOIN", "METRICS", "STABLE", + "error", "program", "cmd", "dbPrefix", + "ids", "cpxName", "ifexists", "alter_db_optr", + "acct_optr", "ifnotexists", "db_optr", "pps", + "tseries", "dbs", "streams", "storage", + "qtime", "users", "conns", "state", + "keep", "tagitemlist", "tables", "cache", + "replica", "days", "rows", "ablocks", + "tblocks", "ctime", "clog", "comp", + "prec", "typename", "signed", "create_table_args", + "columnlist", "select", "column", "tagitem", + "selcollist", "from", "where_opt", "interval_opt", + "fill_opt", "sliding_opt", "groupby_opt", "orderby_opt", + "having_opt", "slimit_opt", "limit_opt", "sclp", + "expr", "as", "tablelist", "tmvar", + "sortlist", "sortitem", "item", "sortorder", + "grouplist", "exprlist", "expritem", "insert_value_list", + "itemlist", }; #endif /* NDEBUG */ @@ -773,206 +778,209 @@ static const char *const yyRuleName[] = { /* 10 */ "cmd ::= SHOW CONFIGS", /* 11 */ "cmd ::= SHOW SCORES", /* 12 */ "cmd ::= SHOW GRANTS", - /* 13 */ "dbPrefix ::=", - /* 14 */ "dbPrefix ::= ids DOT", - /* 15 */ "cpxName ::=", - /* 16 */ "cpxName ::= DOT ids", - /* 17 */ "cmd ::= SHOW dbPrefix TABLES", - /* 18 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids", - /* 19 */ "cmd ::= SHOW dbPrefix STABLES", - /* 20 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", - /* 21 */ "cmd ::= SHOW dbPrefix VGROUPS", - /* 22 */ "cmd ::= DROP TABLE ifexists ids cpxName", - /* 23 */ "cmd ::= DROP DATABASE ifexists ids", - /* 24 */ "cmd ::= DROP DNODE IP", - /* 25 */ "cmd ::= DROP USER ids", - /* 26 */ "cmd ::= DROP ACCOUNT ids", - /* 27 */ "cmd ::= USE ids", - /* 28 */ "cmd ::= DESCRIBE ids cpxName", - /* 29 */ "cmd ::= ALTER USER ids PASS ids", - /* 30 */ "cmd ::= ALTER USER ids PRIVILEGE ids", - /* 31 */ "cmd ::= ALTER DNODE IP ids", - /* 32 */ "cmd ::= ALTER DNODE IP ids ids", - /* 33 */ "cmd ::= ALTER LOCAL ids", - /* 34 */ "cmd ::= ALTER LOCAL ids ids", - /* 35 */ "cmd ::= ALTER DATABASE ids alter_db_optr", - /* 36 */ "cmd ::= ALTER ACCOUNT ids acct_optr", - /* 37 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", - /* 38 */ "ids ::= ID", - /* 39 */ "ids ::= STRING", - /* 40 */ "ifexists ::= IF EXISTS", - /* 41 */ "ifexists ::=", - /* 42 */ "ifnotexists ::= IF NOT EXISTS", - /* 43 */ "ifnotexists ::=", - /* 44 */ "cmd ::= CREATE DNODE IP", - /* 45 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", - /* 46 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", - /* 47 */ "cmd ::= CREATE USER ids PASS ids", - /* 48 */ "pps ::=", - /* 49 */ "pps ::= PPS INTEGER", - /* 50 */ "tseries ::=", - /* 51 */ "tseries ::= TSERIES INTEGER", - /* 52 */ "dbs ::=", - /* 53 */ "dbs ::= DBS INTEGER", - /* 54 */ "streams ::=", - /* 55 */ "streams ::= STREAMS INTEGER", - /* 56 */ "storage ::=", - /* 57 */ "storage ::= STORAGE INTEGER", - /* 58 */ "qtime ::=", - /* 59 */ "qtime ::= QTIME INTEGER", - /* 60 */ "users ::=", - /* 61 */ "users ::= USERS INTEGER", - /* 62 */ "conns ::=", - /* 63 */ "conns ::= CONNS INTEGER", - /* 64 */ "state ::=", - /* 65 */ "state ::= STATE ids", - /* 66 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", - /* 67 */ "keep ::= KEEP tagitemlist", - /* 68 */ "tables ::= TABLES INTEGER", - /* 69 */ "cache ::= CACHE INTEGER", - /* 70 */ "replica ::= REPLICA INTEGER", - /* 71 */ "days ::= DAYS INTEGER", - /* 72 */ "rows ::= ROWS INTEGER", - /* 73 */ "ablocks ::= ABLOCKS ID", - /* 74 */ "tblocks ::= TBLOCKS INTEGER", - /* 75 */ "ctime ::= CTIME INTEGER", - /* 76 */ "clog ::= CLOG INTEGER", - /* 77 */ "comp ::= COMP INTEGER", - /* 78 */ "prec ::= PRECISION STRING", - /* 79 */ "db_optr ::=", - /* 80 */ "db_optr ::= db_optr tables", - /* 81 */ "db_optr ::= db_optr cache", - /* 82 */ "db_optr ::= db_optr replica", - /* 83 */ "db_optr ::= db_optr days", - /* 84 */ "db_optr ::= db_optr rows", - /* 85 */ "db_optr ::= db_optr ablocks", - /* 86 */ "db_optr ::= db_optr tblocks", - /* 87 */ "db_optr ::= db_optr ctime", - /* 88 */ "db_optr ::= db_optr clog", - /* 89 */ "db_optr ::= db_optr comp", - /* 90 */ "db_optr ::= db_optr prec", - /* 91 */ "db_optr ::= db_optr keep", - /* 92 */ "alter_db_optr ::=", - /* 93 */ "alter_db_optr ::= alter_db_optr replica", - /* 94 */ "alter_db_optr ::= alter_db_optr tables", - /* 95 */ "typename ::= ids", - /* 96 */ "typename ::= ids LP signed RP", - /* 97 */ "signed ::= INTEGER", - /* 98 */ "signed ::= PLUS INTEGER", - /* 99 */ "signed ::= MINUS INTEGER", - /* 100 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", - /* 101 */ "create_table_args ::= LP columnlist RP", - /* 102 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", - /* 103 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", - /* 104 */ "create_table_args ::= AS select", - /* 105 */ "columnlist ::= columnlist COMMA column", - /* 106 */ "columnlist ::= column", - /* 107 */ "column ::= ids typename", - /* 108 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 109 */ "tagitemlist ::= tagitem", - /* 110 */ "tagitem ::= INTEGER", - /* 111 */ "tagitem ::= FLOAT", - /* 112 */ "tagitem ::= STRING", - /* 113 */ "tagitem ::= BOOL", - /* 114 */ "tagitem ::= NULL", - /* 115 */ "tagitem ::= MINUS INTEGER", - /* 116 */ "tagitem ::= MINUS FLOAT", - /* 117 */ "tagitem ::= PLUS INTEGER", - /* 118 */ "tagitem ::= PLUS FLOAT", - /* 119 */ "cmd ::= select", - /* 120 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 121 */ "sclp ::= selcollist COMMA", - /* 122 */ "sclp ::=", - /* 123 */ "selcollist ::= sclp expr as", - /* 124 */ "selcollist ::= sclp STAR", - /* 125 */ "as ::= AS ids", - /* 126 */ "as ::= ids", - /* 127 */ "as ::=", - /* 128 */ "from ::= FROM tablelist", - /* 129 */ "tablelist ::= ids cpxName", - /* 130 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 131 */ "tmvar ::= VARIABLE", - /* 132 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 133 */ "interval_opt ::=", - /* 134 */ "fill_opt ::=", - /* 135 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 136 */ "fill_opt ::= FILL LP ID RP", - /* 137 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 138 */ "sliding_opt ::=", - /* 139 */ "orderby_opt ::=", - /* 140 */ "orderby_opt ::= ORDER BY sortlist", - /* 141 */ "sortlist ::= sortlist COMMA item sortorder", - /* 142 */ "sortlist ::= item sortorder", - /* 143 */ "item ::= ids cpxName", - /* 144 */ "sortorder ::= ASC", - /* 145 */ "sortorder ::= DESC", - /* 146 */ "sortorder ::=", - /* 147 */ "groupby_opt ::=", - /* 148 */ "groupby_opt ::= GROUP BY grouplist", - /* 149 */ "grouplist ::= grouplist COMMA item", - /* 150 */ "grouplist ::= item", - /* 151 */ "having_opt ::=", - /* 152 */ "having_opt ::= HAVING expr", - /* 153 */ "limit_opt ::=", - /* 154 */ "limit_opt ::= LIMIT signed", - /* 155 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 156 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 157 */ "slimit_opt ::=", - /* 158 */ "slimit_opt ::= SLIMIT signed", - /* 159 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 160 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 161 */ "where_opt ::=", - /* 162 */ "where_opt ::= WHERE expr", - /* 163 */ "expr ::= LP expr RP", - /* 164 */ "expr ::= ID", - /* 165 */ "expr ::= ID DOT ID", - /* 166 */ "expr ::= ID DOT STAR", - /* 167 */ "expr ::= INTEGER", - /* 168 */ "expr ::= MINUS INTEGER", - /* 169 */ "expr ::= PLUS INTEGER", - /* 170 */ "expr ::= FLOAT", - /* 171 */ "expr ::= MINUS FLOAT", - /* 172 */ "expr ::= PLUS FLOAT", - /* 173 */ "expr ::= STRING", - /* 174 */ "expr ::= NOW", - /* 175 */ "expr ::= VARIABLE", - /* 176 */ "expr ::= BOOL", - /* 177 */ "expr ::= ID LP exprlist RP", - /* 178 */ "expr ::= ID LP STAR RP", - /* 179 */ "expr ::= expr AND expr", - /* 180 */ "expr ::= expr OR expr", - /* 181 */ "expr ::= expr LT expr", - /* 182 */ "expr ::= expr GT expr", - /* 183 */ "expr ::= expr LE expr", - /* 184 */ "expr ::= expr GE expr", - /* 185 */ "expr ::= expr NE expr", - /* 186 */ "expr ::= expr EQ expr", - /* 187 */ "expr ::= expr PLUS expr", - /* 188 */ "expr ::= expr MINUS expr", - /* 189 */ "expr ::= expr STAR expr", - /* 190 */ "expr ::= expr SLASH expr", - /* 191 */ "expr ::= expr REM expr", - /* 192 */ "expr ::= expr LIKE expr", - /* 193 */ "expr ::= expr IN LP exprlist RP", - /* 194 */ "exprlist ::= exprlist COMMA expritem", - /* 195 */ "exprlist ::= expritem", - /* 196 */ "expritem ::= expr", - /* 197 */ "expritem ::=", - /* 198 */ "cmd ::= INSERT INTO cpxName insert_value_list", - /* 199 */ "insert_value_list ::= VALUES LP itemlist RP", - /* 200 */ "insert_value_list ::= insert_value_list VALUES LP itemlist RP", - /* 201 */ "itemlist ::= itemlist COMMA expr", - /* 202 */ "itemlist ::= expr", - /* 203 */ "cmd ::= RESET QUERY CACHE", - /* 204 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 205 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 206 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 207 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 208 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 209 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 210 */ "cmd ::= KILL CONNECTION IP COLON INTEGER", - /* 211 */ "cmd ::= KILL STREAM IP COLON INTEGER COLON INTEGER", - /* 212 */ "cmd ::= KILL QUERY IP COLON INTEGER COLON INTEGER", + /* 13 */ "cmd ::= SHOW VNODES", + /* 14 */ "cmd ::= SHOW VNODES IPTOKEN", + /* 15 */ "dbPrefix ::=", + /* 16 */ "dbPrefix ::= ids DOT", + /* 17 */ "cpxName ::=", + /* 18 */ "cpxName ::= DOT ids", + /* 19 */ "cmd ::= SHOW dbPrefix TABLES", + /* 20 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids", + /* 21 */ "cmd ::= SHOW dbPrefix STABLES", + /* 22 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", + /* 23 */ "cmd ::= SHOW dbPrefix VGROUPS", + /* 24 */ "cmd ::= DROP TABLE ifexists ids cpxName", + /* 25 */ "cmd ::= DROP DATABASE ifexists ids", + /* 26 */ "cmd ::= DROP DNODE IPTOKEN", + /* 27 */ "cmd ::= DROP USER ids", + /* 28 */ "cmd ::= DROP ACCOUNT ids", + /* 29 */ "cmd ::= USE ids", + /* 30 */ "cmd ::= DESCRIBE ids cpxName", + /* 31 */ "cmd ::= ALTER USER ids PASS ids", + /* 32 */ "cmd ::= ALTER USER ids PRIVILEGE ids", + /* 33 */ "cmd ::= ALTER DNODE IPTOKEN ids", + /* 34 */ "cmd ::= ALTER DNODE IPTOKEN ids ids", + /* 35 */ "cmd ::= ALTER LOCAL ids", + /* 36 */ "cmd ::= ALTER LOCAL ids ids", + /* 37 */ "cmd ::= ALTER DATABASE ids alter_db_optr", + /* 38 */ "cmd ::= ALTER ACCOUNT ids acct_optr", + /* 39 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", + /* 40 */ "ids ::= ID", + /* 41 */ "ids ::= STRING", + /* 42 */ "ifexists ::= IF EXISTS", + /* 43 */ "ifexists ::=", + /* 44 */ "ifnotexists ::= IF NOT EXISTS", + /* 45 */ "ifnotexists ::=", + /* 46 */ "cmd ::= CREATE DNODE IPTOKEN", + /* 47 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", + /* 48 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", + /* 49 */ "cmd ::= CREATE USER ids PASS ids", + /* 50 */ "pps ::=", + /* 51 */ "pps ::= PPS INTEGER", + /* 52 */ "tseries ::=", + /* 53 */ "tseries ::= TSERIES INTEGER", + /* 54 */ "dbs ::=", + /* 55 */ "dbs ::= DBS INTEGER", + /* 56 */ "streams ::=", + /* 57 */ "streams ::= STREAMS INTEGER", + /* 58 */ "storage ::=", + /* 59 */ "storage ::= STORAGE INTEGER", + /* 60 */ "qtime ::=", + /* 61 */ "qtime ::= QTIME INTEGER", + /* 62 */ "users ::=", + /* 63 */ "users ::= USERS INTEGER", + /* 64 */ "conns ::=", + /* 65 */ "conns ::= CONNS INTEGER", + /* 66 */ "state ::=", + /* 67 */ "state ::= STATE ids", + /* 68 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", + /* 69 */ "keep ::= KEEP tagitemlist", + /* 70 */ "tables ::= TABLES INTEGER", + /* 71 */ "cache ::= CACHE INTEGER", + /* 72 */ "replica ::= REPLICA INTEGER", + /* 73 */ "days ::= DAYS INTEGER", + /* 74 */ "rows ::= ROWS INTEGER", + /* 75 */ "ablocks ::= ABLOCKS ID", + /* 76 */ "tblocks ::= TBLOCKS INTEGER", + /* 77 */ "ctime ::= CTIME INTEGER", + /* 78 */ "clog ::= CLOG INTEGER", + /* 79 */ "comp ::= COMP INTEGER", + /* 80 */ "prec ::= PRECISION STRING", + /* 81 */ "db_optr ::=", + /* 82 */ "db_optr ::= db_optr tables", + /* 83 */ "db_optr ::= db_optr cache", + /* 84 */ "db_optr ::= db_optr replica", + /* 85 */ "db_optr ::= db_optr days", + /* 86 */ "db_optr ::= db_optr rows", + /* 87 */ "db_optr ::= db_optr ablocks", + /* 88 */ "db_optr ::= db_optr tblocks", + /* 89 */ "db_optr ::= db_optr ctime", + /* 90 */ "db_optr ::= db_optr clog", + /* 91 */ "db_optr ::= db_optr comp", + /* 92 */ "db_optr ::= db_optr prec", + /* 93 */ "db_optr ::= db_optr keep", + /* 94 */ "alter_db_optr ::=", + /* 95 */ "alter_db_optr ::= alter_db_optr replica", + /* 96 */ "alter_db_optr ::= alter_db_optr tables", + /* 97 */ "typename ::= ids", + /* 98 */ "typename ::= ids LP signed RP", + /* 99 */ "signed ::= INTEGER", + /* 100 */ "signed ::= PLUS INTEGER", + /* 101 */ "signed ::= MINUS INTEGER", + /* 102 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", + /* 103 */ "create_table_args ::= LP columnlist RP", + /* 104 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", + /* 105 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", + /* 106 */ "create_table_args ::= AS select", + /* 107 */ "columnlist ::= columnlist COMMA column", + /* 108 */ "columnlist ::= column", + /* 109 */ "column ::= ids typename", + /* 110 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 111 */ "tagitemlist ::= tagitem", + /* 112 */ "tagitem ::= INTEGER", + /* 113 */ "tagitem ::= FLOAT", + /* 114 */ "tagitem ::= STRING", + /* 115 */ "tagitem ::= BOOL", + /* 116 */ "tagitem ::= NULL", + /* 117 */ "tagitem ::= MINUS INTEGER", + /* 118 */ "tagitem ::= MINUS FLOAT", + /* 119 */ "tagitem ::= PLUS INTEGER", + /* 120 */ "tagitem ::= PLUS FLOAT", + /* 121 */ "cmd ::= select", + /* 122 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 123 */ "select ::= SELECT selcollist", + /* 124 */ "sclp ::= selcollist COMMA", + /* 125 */ "sclp ::=", + /* 126 */ "selcollist ::= sclp expr as", + /* 127 */ "selcollist ::= sclp STAR", + /* 128 */ "as ::= AS ids", + /* 129 */ "as ::= ids", + /* 130 */ "as ::=", + /* 131 */ "from ::= FROM tablelist", + /* 132 */ "tablelist ::= ids cpxName", + /* 133 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 134 */ "tmvar ::= VARIABLE", + /* 135 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 136 */ "interval_opt ::=", + /* 137 */ "fill_opt ::=", + /* 138 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 139 */ "fill_opt ::= FILL LP ID RP", + /* 140 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 141 */ "sliding_opt ::=", + /* 142 */ "orderby_opt ::=", + /* 143 */ "orderby_opt ::= ORDER BY sortlist", + /* 144 */ "sortlist ::= sortlist COMMA item sortorder", + /* 145 */ "sortlist ::= item sortorder", + /* 146 */ "item ::= ids cpxName", + /* 147 */ "sortorder ::= ASC", + /* 148 */ "sortorder ::= DESC", + /* 149 */ "sortorder ::=", + /* 150 */ "groupby_opt ::=", + /* 151 */ "groupby_opt ::= GROUP BY grouplist", + /* 152 */ "grouplist ::= grouplist COMMA item", + /* 153 */ "grouplist ::= item", + /* 154 */ "having_opt ::=", + /* 155 */ "having_opt ::= HAVING expr", + /* 156 */ "limit_opt ::=", + /* 157 */ "limit_opt ::= LIMIT signed", + /* 158 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 159 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 160 */ "slimit_opt ::=", + /* 161 */ "slimit_opt ::= SLIMIT signed", + /* 162 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 163 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 164 */ "where_opt ::=", + /* 165 */ "where_opt ::= WHERE expr", + /* 166 */ "expr ::= LP expr RP", + /* 167 */ "expr ::= ID", + /* 168 */ "expr ::= ID DOT ID", + /* 169 */ "expr ::= ID DOT STAR", + /* 170 */ "expr ::= INTEGER", + /* 171 */ "expr ::= MINUS INTEGER", + /* 172 */ "expr ::= PLUS INTEGER", + /* 173 */ "expr ::= FLOAT", + /* 174 */ "expr ::= MINUS FLOAT", + /* 175 */ "expr ::= PLUS FLOAT", + /* 176 */ "expr ::= STRING", + /* 177 */ "expr ::= NOW", + /* 178 */ "expr ::= VARIABLE", + /* 179 */ "expr ::= BOOL", + /* 180 */ "expr ::= ID LP exprlist RP", + /* 181 */ "expr ::= ID LP STAR RP", + /* 182 */ "expr ::= expr AND expr", + /* 183 */ "expr ::= expr OR expr", + /* 184 */ "expr ::= expr LT expr", + /* 185 */ "expr ::= expr GT expr", + /* 186 */ "expr ::= expr LE expr", + /* 187 */ "expr ::= expr GE expr", + /* 188 */ "expr ::= expr NE expr", + /* 189 */ "expr ::= expr EQ expr", + /* 190 */ "expr ::= expr PLUS expr", + /* 191 */ "expr ::= expr MINUS expr", + /* 192 */ "expr ::= expr STAR expr", + /* 193 */ "expr ::= expr SLASH expr", + /* 194 */ "expr ::= expr REM expr", + /* 195 */ "expr ::= expr LIKE expr", + /* 196 */ "expr ::= expr IN LP exprlist RP", + /* 197 */ "exprlist ::= exprlist COMMA expritem", + /* 198 */ "exprlist ::= expritem", + /* 199 */ "expritem ::= expr", + /* 200 */ "expritem ::=", + /* 201 */ "cmd ::= INSERT INTO cpxName insert_value_list", + /* 202 */ "insert_value_list ::= VALUES LP itemlist RP", + /* 203 */ "insert_value_list ::= insert_value_list VALUES LP itemlist RP", + /* 204 */ "itemlist ::= itemlist COMMA expr", + /* 205 */ "itemlist ::= expr", + /* 206 */ "cmd ::= RESET QUERY CACHE", + /* 207 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 208 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 209 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 210 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 211 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 212 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 213 */ "cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER", + /* 214 */ "cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER", + /* 215 */ "cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1063,46 +1071,46 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 215: /* keep */ - case 216: /* tagitemlist */ - case 239: /* fill_opt */ - case 241: /* groupby_opt */ - case 242: /* orderby_opt */ - case 251: /* sortlist */ - case 255: /* grouplist */ + case 216: /* keep */ + case 217: /* tagitemlist */ + case 240: /* fill_opt */ + case 242: /* groupby_opt */ + case 243: /* orderby_opt */ + case 252: /* sortlist */ + case 256: /* grouplist */ { -tVariantListDestroy((yypminor->yy56)); +tVariantListDestroy((yypminor->yy480)); } break; - case 231: /* columnlist */ + case 232: /* columnlist */ { -tFieldListDestroy((yypminor->yy471)); +tFieldListDestroy((yypminor->yy421)); } break; - case 232: /* select */ + case 233: /* select */ { -destroyQuerySql((yypminor->yy24)); +destroyQuerySql((yypminor->yy138)); } break; - case 235: /* selcollist */ - case 246: /* sclp */ - case 256: /* exprlist */ - case 259: /* itemlist */ + case 236: /* selcollist */ + case 247: /* sclp */ + case 257: /* exprlist */ + case 260: /* itemlist */ { -tSQLExprListDestroy((yypminor->yy498)); +tSQLExprListDestroy((yypminor->yy284)); } break; - case 237: /* where_opt */ - case 243: /* having_opt */ - case 247: /* expr */ - case 257: /* expritem */ + case 238: /* where_opt */ + case 244: /* having_opt */ + case 248: /* expr */ + case 258: /* expritem */ { -tSQLExprDestroy((yypminor->yy90)); +tSQLExprDestroy((yypminor->yy244)); } break; - case 252: /* sortitem */ + case 253: /* sortitem */ { -tVariantDestroy(&(yypminor->yy186)); +tVariantDestroy(&(yypminor->yy236)); } break; /********* End destructor definitions *****************************************/ @@ -1343,56 +1351,56 @@ static const struct { YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ unsigned char nrhs; /* Number of right-hand side symbols in the rule */ } yyRuleInfo[] = { - { 196, 1 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 197, 2 }, - { 198, 0 }, + { 197, 1 }, { 198, 2 }, - { 200, 0 }, - { 200, 2 }, - { 197, 3 }, - { 197, 5 }, - { 197, 3 }, - { 197, 5 }, - { 197, 3 }, - { 197, 5 }, - { 197, 4 }, - { 197, 3 }, - { 197, 3 }, - { 197, 3 }, - { 197, 2 }, - { 197, 3 }, - { 197, 5 }, - { 197, 5 }, - { 197, 4 }, - { 197, 5 }, - { 197, 3 }, - { 197, 4 }, - { 197, 4 }, - { 197, 4 }, - { 197, 6 }, - { 199, 1 }, - { 199, 1 }, - { 201, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 2 }, + { 198, 3 }, + { 199, 0 }, + { 199, 2 }, { 201, 0 }, - { 204, 3 }, - { 204, 0 }, - { 197, 3 }, - { 197, 6 }, - { 197, 5 }, - { 197, 5 }, - { 206, 0 }, - { 206, 2 }, + { 201, 2 }, + { 198, 3 }, + { 198, 5 }, + { 198, 3 }, + { 198, 5 }, + { 198, 3 }, + { 198, 5 }, + { 198, 4 }, + { 198, 3 }, + { 198, 3 }, + { 198, 3 }, + { 198, 2 }, + { 198, 3 }, + { 198, 5 }, + { 198, 5 }, + { 198, 4 }, + { 198, 5 }, + { 198, 3 }, + { 198, 4 }, + { 198, 4 }, + { 198, 4 }, + { 198, 6 }, + { 200, 1 }, + { 200, 1 }, + { 202, 2 }, + { 202, 0 }, + { 205, 3 }, + { 205, 0 }, + { 198, 3 }, + { 198, 6 }, + { 198, 5 }, + { 198, 5 }, { 207, 0 }, { 207, 2 }, { 208, 0 }, @@ -1409,9 +1417,10 @@ static const struct { { 213, 2 }, { 214, 0 }, { 214, 2 }, - { 203, 9 }, + { 215, 0 }, { 215, 2 }, - { 217, 2 }, + { 204, 9 }, + { 216, 2 }, { 218, 2 }, { 219, 2 }, { 220, 2 }, @@ -1422,140 +1431,142 @@ static const struct { { 225, 2 }, { 226, 2 }, { 227, 2 }, - { 205, 0 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 205, 2 }, - { 202, 0 }, - { 202, 2 }, - { 202, 2 }, - { 228, 1 }, - { 228, 4 }, + { 228, 2 }, + { 206, 0 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 206, 2 }, + { 203, 0 }, + { 203, 2 }, + { 203, 2 }, { 229, 1 }, - { 229, 2 }, - { 229, 2 }, - { 197, 6 }, - { 230, 3 }, - { 230, 7 }, - { 230, 7 }, + { 229, 4 }, + { 230, 1 }, + { 230, 2 }, { 230, 2 }, + { 198, 6 }, { 231, 3 }, - { 231, 1 }, - { 233, 2 }, - { 216, 3 }, - { 216, 1 }, - { 234, 1 }, - { 234, 1 }, - { 234, 1 }, - { 234, 1 }, - { 234, 1 }, - { 234, 2 }, - { 234, 2 }, - { 234, 2 }, + { 231, 7 }, + { 231, 7 }, + { 231, 2 }, + { 232, 3 }, + { 232, 1 }, { 234, 2 }, - { 197, 1 }, - { 232, 12 }, - { 246, 2 }, - { 246, 0 }, - { 235, 3 }, + { 217, 3 }, + { 217, 1 }, + { 235, 1 }, + { 235, 1 }, + { 235, 1 }, + { 235, 1 }, + { 235, 1 }, { 235, 2 }, - { 248, 2 }, - { 248, 1 }, - { 248, 0 }, + { 235, 2 }, + { 235, 2 }, + { 235, 2 }, + { 198, 1 }, + { 233, 12 }, + { 233, 2 }, + { 247, 2 }, + { 247, 0 }, + { 236, 3 }, { 236, 2 }, { 249, 2 }, - { 249, 4 }, - { 250, 1 }, - { 238, 4 }, - { 238, 0 }, - { 239, 0 }, - { 239, 6 }, + { 249, 1 }, + { 249, 0 }, + { 237, 2 }, + { 250, 2 }, + { 250, 4 }, + { 251, 1 }, { 239, 4 }, - { 240, 4 }, + { 239, 0 }, { 240, 0 }, - { 242, 0 }, - { 242, 3 }, - { 251, 4 }, - { 251, 2 }, - { 253, 2 }, - { 254, 1 }, - { 254, 1 }, - { 254, 0 }, + { 240, 6 }, + { 240, 4 }, + { 241, 4 }, { 241, 0 }, - { 241, 3 }, - { 255, 3 }, - { 255, 1 }, { 243, 0 }, - { 243, 2 }, + { 243, 3 }, + { 252, 4 }, + { 252, 2 }, + { 254, 2 }, + { 255, 1 }, + { 255, 1 }, + { 255, 0 }, + { 242, 0 }, + { 242, 3 }, + { 256, 3 }, + { 256, 1 }, + { 244, 0 }, + { 244, 2 }, + { 246, 0 }, + { 246, 2 }, + { 246, 4 }, + { 246, 4 }, { 245, 0 }, { 245, 2 }, { 245, 4 }, { 245, 4 }, - { 244, 0 }, - { 244, 2 }, - { 244, 4 }, - { 244, 4 }, - { 237, 0 }, - { 237, 2 }, - { 247, 3 }, - { 247, 1 }, - { 247, 3 }, - { 247, 3 }, - { 247, 1 }, - { 247, 2 }, - { 247, 2 }, - { 247, 1 }, - { 247, 2 }, - { 247, 2 }, - { 247, 1 }, - { 247, 1 }, - { 247, 1 }, - { 247, 1 }, - { 247, 4 }, - { 247, 4 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 3 }, - { 247, 5 }, - { 256, 3 }, - { 256, 1 }, + { 238, 0 }, + { 238, 2 }, + { 248, 3 }, + { 248, 1 }, + { 248, 3 }, + { 248, 3 }, + { 248, 1 }, + { 248, 2 }, + { 248, 2 }, + { 248, 1 }, + { 248, 2 }, + { 248, 2 }, + { 248, 1 }, + { 248, 1 }, + { 248, 1 }, + { 248, 1 }, + { 248, 4 }, + { 248, 4 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 3 }, + { 248, 5 }, + { 257, 3 }, { 257, 1 }, - { 257, 0 }, - { 197, 4 }, - { 258, 4 }, - { 258, 5 }, - { 259, 3 }, - { 259, 1 }, - { 197, 3 }, - { 197, 7 }, - { 197, 7 }, - { 197, 7 }, - { 197, 7 }, - { 197, 8 }, - { 197, 9 }, - { 197, 5 }, - { 197, 7 }, - { 197, 7 }, + { 258, 1 }, + { 258, 0 }, + { 198, 4 }, + { 259, 4 }, + { 259, 5 }, + { 260, 3 }, + { 260, 1 }, + { 198, 3 }, + { 198, 7 }, + { 198, 7 }, + { 198, 7 }, + { 198, 7 }, + { 198, 8 }, + { 198, 9 }, + { 198, 5 }, + { 198, 7 }, + { 198, 7 }, }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -1634,563 +1645,578 @@ static void yy_reduce( case 12: /* cmd ::= SHOW GRANTS */ { setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } break; - case 13: /* dbPrefix ::= */ - case 41: /* ifexists ::= */ yytestcase(yyruleno==41); - case 43: /* ifnotexists ::= */ yytestcase(yyruleno==43); + case 13: /* cmd ::= SHOW VNODES */ +{ setDCLSQLElems(pInfo, SHOW_VNODES, 0); } + break; + case 14: /* cmd ::= SHOW VNODES IPTOKEN */ +{ setDCLSQLElems(pInfo, SHOW_VNODES, 1, &yymsp[0].minor.yy0); } + break; + case 15: /* dbPrefix ::= */ + case 43: /* ifexists ::= */ yytestcase(yyruleno==43); + case 45: /* ifnotexists ::= */ yytestcase(yyruleno==45); {yygotominor.yy0.n = 0;} break; - case 14: /* dbPrefix ::= ids DOT */ + case 16: /* dbPrefix ::= ids DOT */ {yygotominor.yy0 = yymsp[-1].minor.yy0; } break; - case 15: /* cpxName ::= */ + case 17: /* cpxName ::= */ {yygotominor.yy0.n = 0; } break; - case 16: /* cpxName ::= DOT ids */ + case 18: /* cpxName ::= DOT ids */ {yygotominor.yy0 = yymsp[0].minor.yy0; yygotominor.yy0.n += 1; } break; - case 17: /* cmd ::= SHOW dbPrefix TABLES */ + case 19: /* cmd ::= SHOW dbPrefix TABLES */ { setDCLSQLElems(pInfo, SHOW_TABLES, 1, &yymsp[-1].minor.yy0); } break; - case 18: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ + case 20: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ { setDCLSQLElems(pInfo, SHOW_TABLES, 2, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } break; - case 19: /* cmd ::= SHOW dbPrefix STABLES */ + case 21: /* cmd ::= SHOW dbPrefix STABLES */ { setDCLSQLElems(pInfo, SHOW_STABLES, 1, &yymsp[-1].minor.yy0); } break; - case 20: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ + case 22: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ { SSQLToken token; setDBName(&token, &yymsp[-3].minor.yy0); setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &yymsp[0].minor.yy0); } break; - case 21: /* cmd ::= SHOW dbPrefix VGROUPS */ + case 23: /* cmd ::= SHOW dbPrefix VGROUPS */ { SSQLToken token; setDBName(&token, &yymsp[-1].minor.yy0); setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token); } break; - case 22: /* cmd ::= DROP TABLE ifexists ids cpxName */ + case 24: /* cmd ::= DROP TABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSQLElems(pInfo, DROP_TABLE, 2, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 23: /* cmd ::= DROP DATABASE ifexists ids */ + case 25: /* cmd ::= DROP DATABASE ifexists ids */ { setDCLSQLElems(pInfo, DROP_DATABASE, 2, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } break; - case 24: /* cmd ::= DROP DNODE IP */ + case 26: /* cmd ::= DROP DNODE IPTOKEN */ { setDCLSQLElems(pInfo, DROP_DNODE, 1, &yymsp[0].minor.yy0); } break; - case 25: /* cmd ::= DROP USER ids */ + case 27: /* cmd ::= DROP USER ids */ { setDCLSQLElems(pInfo, DROP_USER, 1, &yymsp[0].minor.yy0); } break; - case 26: /* cmd ::= DROP ACCOUNT ids */ + case 28: /* cmd ::= DROP ACCOUNT ids */ { setDCLSQLElems(pInfo, DROP_ACCOUNT, 1, &yymsp[0].minor.yy0); } break; - case 27: /* cmd ::= USE ids */ + case 29: /* cmd ::= USE ids */ { setDCLSQLElems(pInfo, USE_DATABASE, 1, &yymsp[0].minor.yy0);} break; - case 28: /* cmd ::= DESCRIBE ids cpxName */ + case 30: /* cmd ::= DESCRIBE ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } break; - case 29: /* cmd ::= ALTER USER ids PASS ids */ + case 31: /* cmd ::= ALTER USER ids PASS ids */ { setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 30: /* cmd ::= ALTER USER ids PRIVILEGE ids */ + case 32: /* cmd ::= ALTER USER ids PRIVILEGE ids */ { setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; - case 31: /* cmd ::= ALTER DNODE IP ids */ + case 33: /* cmd ::= ALTER DNODE IPTOKEN ids */ { setDCLSQLElems(pInfo, ALTER_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 32: /* cmd ::= ALTER DNODE IP ids ids */ + case 34: /* cmd ::= ALTER DNODE IPTOKEN ids ids */ { setDCLSQLElems(pInfo, ALTER_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 33: /* cmd ::= ALTER LOCAL ids */ + case 35: /* cmd ::= ALTER LOCAL ids */ { setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &yymsp[0].minor.yy0); } break; - case 34: /* cmd ::= ALTER LOCAL ids ids */ + case 36: /* cmd ::= ALTER LOCAL ids ids */ { setDCLSQLElems(pInfo, ALTER_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 35: /* cmd ::= ALTER DATABASE ids alter_db_optr */ -{ SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy398, &t);} + case 37: /* cmd ::= ALTER DATABASE ids alter_db_optr */ +{ SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &t);} break; - case 36: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ SSQLToken t = {0}; setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-1].minor.yy0, &t, &yymsp[0].minor.yy279);} + case 38: /* cmd ::= ALTER ACCOUNT ids acct_optr */ +{ SSQLToken t = {0}; setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-1].minor.yy0, &t, &yymsp[0].minor.yy155);} break; - case 37: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy279);} + case 39: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ +{ setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy155);} break; - case 38: /* ids ::= ID */ - case 39: /* ids ::= STRING */ yytestcase(yyruleno==39); + case 40: /* ids ::= ID */ + case 41: /* ids ::= STRING */ yytestcase(yyruleno==41); {yygotominor.yy0 = yymsp[0].minor.yy0; } break; - case 40: /* ifexists ::= IF EXISTS */ - case 42: /* ifnotexists ::= IF NOT EXISTS */ yytestcase(yyruleno==42); + case 42: /* ifexists ::= IF EXISTS */ + case 44: /* ifnotexists ::= IF NOT EXISTS */ yytestcase(yyruleno==44); {yygotominor.yy0.n = 1;} break; - case 44: /* cmd ::= CREATE DNODE IP */ + case 46: /* cmd ::= CREATE DNODE IPTOKEN */ { setDCLSQLElems(pInfo, CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; - case 45: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy279);} + case 47: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ +{ setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy155);} break; - case 46: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ -{ setCreateDBSQL(pInfo, CREATE_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy398, &yymsp[-2].minor.yy0);} + case 48: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ +{ setCreateDBSQL(pInfo, CREATE_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &yymsp[-2].minor.yy0);} break; - case 47: /* cmd ::= CREATE USER ids PASS ids */ + case 49: /* cmd ::= CREATE USER ids PASS ids */ { setDCLSQLElems(pInfo, CREATE_USER, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; - case 48: /* pps ::= */ - case 50: /* tseries ::= */ yytestcase(yyruleno==50); - case 52: /* dbs ::= */ yytestcase(yyruleno==52); - case 54: /* streams ::= */ yytestcase(yyruleno==54); - case 56: /* storage ::= */ yytestcase(yyruleno==56); - case 58: /* qtime ::= */ yytestcase(yyruleno==58); - case 60: /* users ::= */ yytestcase(yyruleno==60); - case 62: /* conns ::= */ yytestcase(yyruleno==62); - case 64: /* state ::= */ yytestcase(yyruleno==64); - case 133: /* interval_opt ::= */ yytestcase(yyruleno==133); - case 138: /* sliding_opt ::= */ yytestcase(yyruleno==138); + case 50: /* pps ::= */ + case 52: /* tseries ::= */ yytestcase(yyruleno==52); + case 54: /* dbs ::= */ yytestcase(yyruleno==54); + case 56: /* streams ::= */ yytestcase(yyruleno==56); + case 58: /* storage ::= */ yytestcase(yyruleno==58); + case 60: /* qtime ::= */ yytestcase(yyruleno==60); + case 62: /* users ::= */ yytestcase(yyruleno==62); + case 64: /* conns ::= */ yytestcase(yyruleno==64); + case 66: /* state ::= */ yytestcase(yyruleno==66); {yygotominor.yy0.n = 0; } break; - case 49: /* pps ::= PPS INTEGER */ - case 51: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==51); - case 53: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==53); - case 55: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==55); - case 57: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==57); - case 59: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==59); - case 61: /* users ::= USERS INTEGER */ yytestcase(yyruleno==61); - case 63: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==63); - case 65: /* state ::= STATE ids */ yytestcase(yyruleno==65); + case 51: /* pps ::= PPS INTEGER */ + case 53: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==53); + case 55: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==55); + case 57: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==57); + case 59: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==59); + case 61: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==61); + case 63: /* users ::= USERS INTEGER */ yytestcase(yyruleno==63); + case 65: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==65); + case 67: /* state ::= STATE ids */ yytestcase(yyruleno==67); {yygotominor.yy0 = yymsp[0].minor.yy0; } break; - case 66: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + case 68: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yygotominor.yy279.users = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yygotominor.yy279.dbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yygotominor.yy279.tseries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yygotominor.yy279.streams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yygotominor.yy279.pps = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yygotominor.yy279.storage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yygotominor.yy279.qtime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yygotominor.yy279.conns = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yygotominor.yy279.stat = yymsp[0].minor.yy0; + yygotominor.yy155.users = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yygotominor.yy155.dbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yygotominor.yy155.tseries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yygotominor.yy155.streams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yygotominor.yy155.pps = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yygotominor.yy155.storage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yygotominor.yy155.qtime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yygotominor.yy155.conns = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yygotominor.yy155.stat = yymsp[0].minor.yy0; } break; - case 67: /* keep ::= KEEP tagitemlist */ -{ yygotominor.yy56 = yymsp[0].minor.yy56; } - break; - case 68: /* tables ::= TABLES INTEGER */ - case 69: /* cache ::= CACHE INTEGER */ yytestcase(yyruleno==69); - case 70: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==70); - case 71: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==71); - case 72: /* rows ::= ROWS INTEGER */ yytestcase(yyruleno==72); - case 73: /* ablocks ::= ABLOCKS ID */ yytestcase(yyruleno==73); - case 74: /* tblocks ::= TBLOCKS INTEGER */ yytestcase(yyruleno==74); - case 75: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==75); - case 76: /* clog ::= CLOG INTEGER */ yytestcase(yyruleno==76); - case 77: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==77); - case 78: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==78); - case 79: /* db_optr ::= */ yytestcase(yyruleno==79); + case 69: /* keep ::= KEEP tagitemlist */ +{ yygotominor.yy480 = yymsp[0].minor.yy480; } + break; + case 70: /* tables ::= TABLES INTEGER */ + case 71: /* cache ::= CACHE INTEGER */ yytestcase(yyruleno==71); + case 72: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==72); + case 73: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==73); + case 74: /* rows ::= ROWS INTEGER */ yytestcase(yyruleno==74); + case 75: /* ablocks ::= ABLOCKS ID */ yytestcase(yyruleno==75); + case 76: /* tblocks ::= TBLOCKS INTEGER */ yytestcase(yyruleno==76); + case 77: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==77); + case 78: /* clog ::= CLOG INTEGER */ yytestcase(yyruleno==78); + case 79: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==79); + case 80: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==80); { yygotominor.yy0 = yymsp[0].minor.yy0; } break; - case 80: /* db_optr ::= db_optr tables */ - case 94: /* alter_db_optr ::= alter_db_optr tables */ yytestcase(yyruleno==94); -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.tablesPerVnode = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 81: /* db_optr ::= */ +{setDefaultCreateDbOption(&yygotominor.yy262);} + break; + case 82: /* db_optr ::= db_optr tables */ + case 96: /* alter_db_optr ::= alter_db_optr tables */ yytestcase(yyruleno==96); +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.tablesPerVnode = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 81: /* db_optr ::= db_optr cache */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 83: /* db_optr ::= db_optr cache */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 82: /* db_optr ::= db_optr replica */ - case 93: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==93); -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 84: /* db_optr ::= db_optr replica */ + case 95: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==95); +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 83: /* db_optr ::= db_optr days */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 85: /* db_optr ::= db_optr days */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 84: /* db_optr ::= db_optr rows */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.rowPerFileBlock = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 86: /* db_optr ::= db_optr rows */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.rowPerFileBlock = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 85: /* db_optr ::= db_optr ablocks */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.numOfAvgCacheBlocks = strtod(yymsp[0].minor.yy0.z, NULL); } + case 87: /* db_optr ::= db_optr ablocks */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.numOfAvgCacheBlocks = strtod(yymsp[0].minor.yy0.z, NULL); } break; - case 86: /* db_optr ::= db_optr tblocks */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.numOfBlocksPerTable = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 88: /* db_optr ::= db_optr tblocks */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.numOfBlocksPerTable = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 87: /* db_optr ::= db_optr ctime */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 89: /* db_optr ::= db_optr ctime */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 88: /* db_optr ::= db_optr clog */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.commitLog = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 90: /* db_optr ::= db_optr clog */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.commitLog = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 89: /* db_optr ::= db_optr comp */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 91: /* db_optr ::= db_optr comp */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 90: /* db_optr ::= db_optr prec */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.precision = yymsp[0].minor.yy0; } + case 92: /* db_optr ::= db_optr prec */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.precision = yymsp[0].minor.yy0; } break; - case 91: /* db_optr ::= db_optr keep */ -{ yygotominor.yy398 = yymsp[-1].minor.yy398; yygotominor.yy398.keep = yymsp[0].minor.yy56; } + case 93: /* db_optr ::= db_optr keep */ +{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.keep = yymsp[0].minor.yy480; } break; - case 92: /* alter_db_optr ::= */ -{ memset(&yygotominor.yy398, 0, sizeof(SCreateDBInfo));} + case 94: /* alter_db_optr ::= */ +{ setDefaultCreateDbOption(&yygotominor.yy262);} break; - case 95: /* typename ::= ids */ -{ tSQLSetColumnType (&yygotominor.yy223, &yymsp[0].minor.yy0); } + case 97: /* typename ::= ids */ +{ tSQLSetColumnType (&yygotominor.yy397, &yymsp[0].minor.yy0); } break; - case 96: /* typename ::= ids LP signed RP */ + case 98: /* typename ::= ids LP signed RP */ { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy389; // negative value of name length - tSQLSetColumnType(&yygotominor.yy223, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy369; // negative value of name length + tSQLSetColumnType(&yygotominor.yy397, &yymsp[-3].minor.yy0); } break; - case 97: /* signed ::= INTEGER */ - case 98: /* signed ::= PLUS INTEGER */ yytestcase(yyruleno==98); -{ yygotominor.yy389 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 99: /* signed ::= INTEGER */ + case 100: /* signed ::= PLUS INTEGER */ yytestcase(yyruleno==100); +{ yygotominor.yy369 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 99: /* signed ::= MINUS INTEGER */ -{ yygotominor.yy389 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} + case 101: /* signed ::= MINUS INTEGER */ +{ yygotominor.yy369 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; - case 100: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ + case 102: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; setCreatedMeterName(pInfo, &yymsp[-2].minor.yy0, &yymsp[-3].minor.yy0); } break; - case 101: /* create_table_args ::= LP columnlist RP */ + case 103: /* create_table_args ::= LP columnlist RP */ { - yygotominor.yy158 = tSetCreateSQLElems(yymsp[-1].minor.yy471, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); - setSQLInfo(pInfo, yygotominor.yy158, NULL, TSQL_CREATE_NORMAL_METER); + yygotominor.yy344 = tSetCreateSQLElems(yymsp[-1].minor.yy421, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); + setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_NORMAL_METER); } break; - case 102: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ + case 104: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ { - yygotominor.yy158 = tSetCreateSQLElems(yymsp[-5].minor.yy471, yymsp[-1].minor.yy471, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); - setSQLInfo(pInfo, yygotominor.yy158, NULL, TSQL_CREATE_NORMAL_METRIC); + yygotominor.yy344 = tSetCreateSQLElems(yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); + setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_NORMAL_METRIC); } break; - case 103: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ + case 105: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; - yygotominor.yy158 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy56, NULL, TSQL_CREATE_METER_FROM_METRIC); - setSQLInfo(pInfo, yygotominor.yy158, NULL, TSQL_CREATE_METER_FROM_METRIC); + yygotominor.yy344 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy480, NULL, TSQL_CREATE_METER_FROM_METRIC); + setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_METER_FROM_METRIC); } break; - case 104: /* create_table_args ::= AS select */ + case 106: /* create_table_args ::= AS select */ { - yygotominor.yy158 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy24, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, yygotominor.yy158, NULL, TSQL_CREATE_STREAM); + yygotominor.yy344 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy138, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_STREAM); } break; - case 105: /* columnlist ::= columnlist COMMA column */ -{yygotominor.yy471 = tFieldListAppend(yymsp[-2].minor.yy471, &yymsp[0].minor.yy223); } + case 107: /* columnlist ::= columnlist COMMA column */ +{yygotominor.yy421 = tFieldListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy397); } break; - case 106: /* columnlist ::= column */ -{yygotominor.yy471 = tFieldListAppend(NULL, &yymsp[0].minor.yy223);} + case 108: /* columnlist ::= column */ +{yygotominor.yy421 = tFieldListAppend(NULL, &yymsp[0].minor.yy397);} break; - case 107: /* column ::= ids typename */ + case 109: /* column ::= ids typename */ { - tSQLSetColumnInfo(&yygotominor.yy223, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy223); + tSQLSetColumnInfo(&yygotominor.yy397, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy397); } break; - case 108: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yygotominor.yy56 = tVariantListAppend(yymsp[-2].minor.yy56, &yymsp[0].minor.yy186, -1); } + case 110: /* tagitemlist ::= tagitemlist COMMA tagitem */ +{ yygotominor.yy480 = tVariantListAppend(yymsp[-2].minor.yy480, &yymsp[0].minor.yy236, -1); } break; - case 109: /* tagitemlist ::= tagitem */ -{ yygotominor.yy56 = tVariantListAppend(NULL, &yymsp[0].minor.yy186, -1); } + case 111: /* tagitemlist ::= tagitem */ +{ yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[0].minor.yy236, -1); } break; - case 110: /* tagitem ::= INTEGER */ - case 111: /* tagitem ::= FLOAT */ yytestcase(yyruleno==111); - case 112: /* tagitem ::= STRING */ yytestcase(yyruleno==112); - case 113: /* tagitem ::= BOOL */ yytestcase(yyruleno==113); -{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yygotominor.yy186, &yymsp[0].minor.yy0); } + case 112: /* tagitem ::= INTEGER */ + case 113: /* tagitem ::= FLOAT */ yytestcase(yyruleno==113); + case 114: /* tagitem ::= STRING */ yytestcase(yyruleno==114); + case 115: /* tagitem ::= BOOL */ yytestcase(yyruleno==115); +{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yygotominor.yy236, &yymsp[0].minor.yy0); } break; - case 114: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yygotominor.yy186, &yymsp[0].minor.yy0); } + case 116: /* tagitem ::= NULL */ +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yygotominor.yy236, &yymsp[0].minor.yy0); } break; - case 115: /* tagitem ::= MINUS INTEGER */ - case 116: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==116); - case 117: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==117); - case 118: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==118); + case 117: /* tagitem ::= MINUS INTEGER */ + case 118: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==118); + case 119: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==119); + case 120: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==120); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yygotominor.yy186, &yymsp[-1].minor.yy0); + tVariantCreate(&yygotominor.yy236, &yymsp[-1].minor.yy0); +} + break; + case 121: /* cmd ::= select */ +{ + setSQLInfo(pInfo, yymsp[0].minor.yy138, NULL, TSQL_QUERY_METER); } break; - case 119: /* cmd ::= select */ + case 122: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - setSQLInfo(pInfo, yymsp[0].minor.yy24, NULL, TSQL_QUERY_METER); + yygotominor.yy138 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy284, yymsp[-9].minor.yy480, yymsp[-8].minor.yy244, yymsp[-4].minor.yy480, yymsp[-3].minor.yy480, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy480, &yymsp[0].minor.yy162, &yymsp[-1].minor.yy162); } break; - case 120: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 123: /* select ::= SELECT selcollist */ { - yygotominor.yy24 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy498, yymsp[-9].minor.yy56, yymsp[-8].minor.yy90, yymsp[-4].minor.yy56, yymsp[-3].minor.yy56, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy56, &yymsp[0].minor.yy294, &yymsp[-1].minor.yy294); + yygotominor.yy138 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy284, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } break; - case 121: /* sclp ::= selcollist COMMA */ -{yygotominor.yy498 = yymsp[-1].minor.yy498;} + case 124: /* sclp ::= selcollist COMMA */ +{yygotominor.yy284 = yymsp[-1].minor.yy284;} break; - case 122: /* sclp ::= */ -{yygotominor.yy498 = 0;} + case 125: /* sclp ::= */ +{yygotominor.yy284 = 0;} break; - case 123: /* selcollist ::= sclp expr as */ + case 126: /* selcollist ::= sclp expr as */ { - yygotominor.yy498 = tSQLExprListAppend(yymsp[-2].minor.yy498, yymsp[-1].minor.yy90, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yygotominor.yy284 = tSQLExprListAppend(yymsp[-2].minor.yy284, yymsp[-1].minor.yy244, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } break; - case 124: /* selcollist ::= sclp STAR */ + case 127: /* selcollist ::= sclp STAR */ { tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); - yygotominor.yy498 = tSQLExprListAppend(yymsp[-1].minor.yy498, pNode, 0); + yygotominor.yy284 = tSQLExprListAppend(yymsp[-1].minor.yy284, pNode, 0); } break; - case 125: /* as ::= AS ids */ - case 126: /* as ::= ids */ yytestcase(yyruleno==126); + case 128: /* as ::= AS ids */ + case 129: /* as ::= ids */ yytestcase(yyruleno==129); { yygotominor.yy0 = yymsp[0].minor.yy0; } break; - case 127: /* as ::= */ + case 130: /* as ::= */ { yygotominor.yy0.n = 0; } break; - case 128: /* from ::= FROM tablelist */ - case 140: /* orderby_opt ::= ORDER BY sortlist */ yytestcase(yyruleno==140); - case 148: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==148); -{yygotominor.yy56 = yymsp[0].minor.yy56;} + case 131: /* from ::= FROM tablelist */ + case 143: /* orderby_opt ::= ORDER BY sortlist */ yytestcase(yyruleno==143); + case 151: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==151); +{yygotominor.yy480 = yymsp[0].minor.yy480;} break; - case 129: /* tablelist ::= ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy56 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} + case 132: /* tablelist ::= ids cpxName */ +{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy480 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} break; - case 130: /* tablelist ::= tablelist COMMA ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy56 = tVariantListAppendToken(yymsp[-3].minor.yy56, &yymsp[-1].minor.yy0, -1); } + case 133: /* tablelist ::= tablelist COMMA ids cpxName */ +{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy480 = tVariantListAppendToken(yymsp[-3].minor.yy480, &yymsp[-1].minor.yy0, -1); } break; - case 131: /* tmvar ::= VARIABLE */ + case 134: /* tmvar ::= VARIABLE */ {yygotominor.yy0 = yymsp[0].minor.yy0;} break; - case 132: /* interval_opt ::= INTERVAL LP tmvar RP */ - case 137: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==137); + case 135: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 140: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==140); {yygotominor.yy0 = yymsp[-1].minor.yy0; } break; - case 134: /* fill_opt ::= */ -{yygotominor.yy56 = 0; } + case 136: /* interval_opt ::= */ + case 141: /* sliding_opt ::= */ yytestcase(yyruleno==141); +{yygotominor.yy0.n = 0; yygotominor.yy0.z = NULL; yygotominor.yy0.type = 0; } + break; + case 137: /* fill_opt ::= */ +{yygotominor.yy480 = 0; } break; - case 135: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 138: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy56, &A, -1, 0); - yygotominor.yy56 = yymsp[-1].minor.yy56; + tVariantListInsert(yymsp[-1].minor.yy480, &A, -1, 0); + yygotominor.yy480 = yymsp[-1].minor.yy480; } break; - case 136: /* fill_opt ::= FILL LP ID RP */ + case 139: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yygotominor.yy56 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yygotominor.yy480 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 139: /* orderby_opt ::= */ - case 147: /* groupby_opt ::= */ yytestcase(yyruleno==147); -{yygotominor.yy56 = 0;} + case 142: /* orderby_opt ::= */ + case 150: /* groupby_opt ::= */ yytestcase(yyruleno==150); +{yygotominor.yy480 = 0;} break; - case 141: /* sortlist ::= sortlist COMMA item sortorder */ + case 144: /* sortlist ::= sortlist COMMA item sortorder */ { - yygotominor.yy56 = tVariantListAppend(yymsp[-3].minor.yy56, &yymsp[-1].minor.yy186, yymsp[0].minor.yy332); + yygotominor.yy480 = tVariantListAppend(yymsp[-3].minor.yy480, &yymsp[-1].minor.yy236, yymsp[0].minor.yy220); } break; - case 142: /* sortlist ::= item sortorder */ + case 145: /* sortlist ::= item sortorder */ { - yygotominor.yy56 = tVariantListAppend(NULL, &yymsp[-1].minor.yy186, yymsp[0].minor.yy332); + yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[-1].minor.yy236, yymsp[0].minor.yy220); } break; - case 143: /* item ::= ids cpxName */ + case 146: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yygotominor.yy186, &yymsp[-1].minor.yy0); + tVariantCreate(&yygotominor.yy236, &yymsp[-1].minor.yy0); } break; - case 144: /* sortorder ::= ASC */ -{yygotominor.yy332 = TSQL_SO_ASC; } + case 147: /* sortorder ::= ASC */ +{yygotominor.yy220 = TSQL_SO_ASC; } break; - case 145: /* sortorder ::= DESC */ -{yygotominor.yy332 = TSQL_SO_DESC;} + case 148: /* sortorder ::= DESC */ +{yygotominor.yy220 = TSQL_SO_DESC;} break; - case 146: /* sortorder ::= */ -{yygotominor.yy332 = TSQL_SO_ASC;} + case 149: /* sortorder ::= */ +{yygotominor.yy220 = TSQL_SO_ASC;} break; - case 149: /* grouplist ::= grouplist COMMA item */ + case 152: /* grouplist ::= grouplist COMMA item */ { - yygotominor.yy56 = tVariantListAppend(yymsp[-2].minor.yy56, &yymsp[0].minor.yy186, -1); + yygotominor.yy480 = tVariantListAppend(yymsp[-2].minor.yy480, &yymsp[0].minor.yy236, -1); } break; - case 150: /* grouplist ::= item */ + case 153: /* grouplist ::= item */ { - yygotominor.yy56 = tVariantListAppend(NULL, &yymsp[0].minor.yy186, -1); + yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[0].minor.yy236, -1); } break; - case 151: /* having_opt ::= */ - case 161: /* where_opt ::= */ yytestcase(yyruleno==161); - case 197: /* expritem ::= */ yytestcase(yyruleno==197); -{yygotominor.yy90 = 0;} + case 154: /* having_opt ::= */ + case 164: /* where_opt ::= */ yytestcase(yyruleno==164); + case 200: /* expritem ::= */ yytestcase(yyruleno==200); +{yygotominor.yy244 = 0;} break; - case 152: /* having_opt ::= HAVING expr */ - case 162: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==162); - case 196: /* expritem ::= expr */ yytestcase(yyruleno==196); -{yygotominor.yy90 = yymsp[0].minor.yy90;} + case 155: /* having_opt ::= HAVING expr */ + case 165: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==165); + case 199: /* expritem ::= expr */ yytestcase(yyruleno==199); +{yygotominor.yy244 = yymsp[0].minor.yy244;} break; - case 153: /* limit_opt ::= */ - case 157: /* slimit_opt ::= */ yytestcase(yyruleno==157); -{yygotominor.yy294.limit = -1; yygotominor.yy294.offset = 0;} + case 156: /* limit_opt ::= */ + case 160: /* slimit_opt ::= */ yytestcase(yyruleno==160); +{yygotominor.yy162.limit = -1; yygotominor.yy162.offset = 0;} break; - case 154: /* limit_opt ::= LIMIT signed */ - case 158: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==158); -{yygotominor.yy294.limit = yymsp[0].minor.yy389; yygotominor.yy294.offset = 0;} + case 157: /* limit_opt ::= LIMIT signed */ + case 161: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==161); +{yygotominor.yy162.limit = yymsp[0].minor.yy369; yygotominor.yy162.offset = 0;} break; - case 155: /* limit_opt ::= LIMIT signed OFFSET signed */ - case 159: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==159); -{yygotominor.yy294.limit = yymsp[-2].minor.yy389; yygotominor.yy294.offset = yymsp[0].minor.yy389;} + case 158: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 162: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==162); +{yygotominor.yy162.limit = yymsp[-2].minor.yy369; yygotominor.yy162.offset = yymsp[0].minor.yy369;} break; - case 156: /* limit_opt ::= LIMIT signed COMMA signed */ - case 160: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==160); -{yygotominor.yy294.limit = yymsp[0].minor.yy389; yygotominor.yy294.offset = yymsp[-2].minor.yy389;} + case 159: /* limit_opt ::= LIMIT signed COMMA signed */ + case 163: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==163); +{yygotominor.yy162.limit = yymsp[0].minor.yy369; yygotominor.yy162.offset = yymsp[-2].minor.yy369;} break; - case 163: /* expr ::= LP expr RP */ -{yygotominor.yy90 = yymsp[-1].minor.yy90; } + case 166: /* expr ::= LP expr RP */ +{yygotominor.yy244 = yymsp[-1].minor.yy244; } break; - case 164: /* expr ::= ID */ -{yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + case 167: /* expr ::= ID */ +{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} break; - case 165: /* expr ::= ID DOT ID */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + case 168: /* expr ::= ID DOT ID */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} break; - case 166: /* expr ::= ID DOT STAR */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + case 169: /* expr ::= ID DOT STAR */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} break; - case 167: /* expr ::= INTEGER */ -{yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + case 170: /* expr ::= INTEGER */ +{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} break; - case 168: /* expr ::= MINUS INTEGER */ - case 169: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==169); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + case 171: /* expr ::= MINUS INTEGER */ + case 172: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==172); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} break; - case 170: /* expr ::= FLOAT */ -{yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + case 173: /* expr ::= FLOAT */ +{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} break; - case 171: /* expr ::= MINUS FLOAT */ - case 172: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==172); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + case 174: /* expr ::= MINUS FLOAT */ + case 175: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==175); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} break; - case 173: /* expr ::= STRING */ -{yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + case 176: /* expr ::= STRING */ +{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} break; - case 174: /* expr ::= NOW */ -{yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + case 177: /* expr ::= NOW */ +{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } break; - case 175: /* expr ::= VARIABLE */ -{yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + case 178: /* expr ::= VARIABLE */ +{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} break; - case 176: /* expr ::= BOOL */ -{yygotominor.yy90 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + case 179: /* expr ::= BOOL */ +{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} break; - case 177: /* expr ::= ID LP exprlist RP */ + case 180: /* expr ::= ID LP exprlist RP */ { - yygotominor.yy90 = tSQLExprCreateFunction(yymsp[-1].minor.yy498, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); + yygotominor.yy244 = tSQLExprCreateFunction(yymsp[-1].minor.yy284, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } break; - case 178: /* expr ::= ID LP STAR RP */ + case 181: /* expr ::= ID LP STAR RP */ { - yygotominor.yy90 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); + yygotominor.yy244 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } break; - case 179: /* expr ::= expr AND expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_AND);} + case 182: /* expr ::= expr AND expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_AND);} break; - case 180: /* expr ::= expr OR expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_OR); } + case 183: /* expr ::= expr OR expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_OR); } break; - case 181: /* expr ::= expr LT expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_LT);} + case 184: /* expr ::= expr LT expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LT);} break; - case 182: /* expr ::= expr GT expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_GT);} + case 185: /* expr ::= expr GT expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_GT);} break; - case 183: /* expr ::= expr LE expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_LE);} + case 186: /* expr ::= expr LE expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LE);} break; - case 184: /* expr ::= expr GE expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_GE);} + case 187: /* expr ::= expr GE expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_GE);} break; - case 185: /* expr ::= expr NE expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_NE);} + case 188: /* expr ::= expr NE expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_NE);} break; - case 186: /* expr ::= expr EQ expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_EQ);} + case 189: /* expr ::= expr EQ expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_EQ);} break; - case 187: /* expr ::= expr PLUS expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_PLUS); } + case 190: /* expr ::= expr PLUS expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_PLUS); } break; - case 188: /* expr ::= expr MINUS expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_MINUS); } + case 191: /* expr ::= expr MINUS expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_MINUS); } break; - case 189: /* expr ::= expr STAR expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_STAR); } + case 192: /* expr ::= expr STAR expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_STAR); } break; - case 190: /* expr ::= expr SLASH expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_DIVIDE);} + case 193: /* expr ::= expr SLASH expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_DIVIDE);} break; - case 191: /* expr ::= expr REM expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_REM); } + case 194: /* expr ::= expr REM expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_REM); } break; - case 192: /* expr ::= expr LIKE expr */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-2].minor.yy90, yymsp[0].minor.yy90, TK_LIKE); } + case 195: /* expr ::= expr LIKE expr */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LIKE); } break; - case 193: /* expr ::= expr IN LP exprlist RP */ -{yygotominor.yy90 = tSQLExprCreate(yymsp[-4].minor.yy90, (tSQLExpr*)yymsp[-1].minor.yy498, TK_IN); } + case 196: /* expr ::= expr IN LP exprlist RP */ +{yygotominor.yy244 = tSQLExprCreate(yymsp[-4].minor.yy244, (tSQLExpr*)yymsp[-1].minor.yy284, TK_IN); } break; - case 194: /* exprlist ::= exprlist COMMA expritem */ - case 201: /* itemlist ::= itemlist COMMA expr */ yytestcase(yyruleno==201); -{yygotominor.yy498 = tSQLExprListAppend(yymsp[-2].minor.yy498,yymsp[0].minor.yy90,0);} + case 197: /* exprlist ::= exprlist COMMA expritem */ + case 204: /* itemlist ::= itemlist COMMA expr */ yytestcase(yyruleno==204); +{yygotominor.yy284 = tSQLExprListAppend(yymsp[-2].minor.yy284,yymsp[0].minor.yy244,0);} break; - case 195: /* exprlist ::= expritem */ - case 202: /* itemlist ::= expr */ yytestcase(yyruleno==202); -{yygotominor.yy498 = tSQLExprListAppend(0,yymsp[0].minor.yy90,0);} + case 198: /* exprlist ::= expritem */ + case 205: /* itemlist ::= expr */ yytestcase(yyruleno==205); +{yygotominor.yy284 = tSQLExprListAppend(0,yymsp[0].minor.yy244,0);} break; - case 198: /* cmd ::= INSERT INTO cpxName insert_value_list */ + case 201: /* cmd ::= INSERT INTO cpxName insert_value_list */ { - tSetInsertSQLElems(pInfo, &yymsp[-1].minor.yy0, yymsp[0].minor.yy74); + tSetInsertSQLElems(pInfo, &yymsp[-1].minor.yy0, yymsp[0].minor.yy237); } break; - case 199: /* insert_value_list ::= VALUES LP itemlist RP */ -{yygotominor.yy74 = tSQLListListAppend(NULL, yymsp[-1].minor.yy498);} + case 202: /* insert_value_list ::= VALUES LP itemlist RP */ +{yygotominor.yy237 = tSQLListListAppend(NULL, yymsp[-1].minor.yy284);} break; - case 200: /* insert_value_list ::= insert_value_list VALUES LP itemlist RP */ -{yygotominor.yy74 = tSQLListListAppend(yymsp[-4].minor.yy74, yymsp[-1].minor.yy498);} + case 203: /* insert_value_list ::= insert_value_list VALUES LP itemlist RP */ +{yygotominor.yy237 = tSQLListListAppend(yymsp[-4].minor.yy237, yymsp[-1].minor.yy284);} break; - case 203: /* cmd ::= RESET QUERY CACHE */ + case 206: /* cmd ::= RESET QUERY CACHE */ { setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);} break; - case 204: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 207: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy471, NULL, ALTER_TABLE_ADD_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, ALTER_TABLE_ADD_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN); } break; - case 205: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 208: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2201,14 +2227,14 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN); } break; - case 206: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 209: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy471, NULL, ALTER_TABLE_TAGS_ADD); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, ALTER_TABLE_TAGS_ADD); setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD); } break; - case 207: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 210: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2219,7 +2245,7 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP); } break; - case 208: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 211: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2233,25 +2259,25 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG); } break; - case 209: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 212: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); tVariantList* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy186, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy236, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, ALTER_TABLE_TAGS_SET); setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET); } break; - case 210: /* cmd ::= KILL CONNECTION IP COLON INTEGER */ + case 213: /* cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &yymsp[-2].minor.yy0);} break; - case 211: /* cmd ::= KILL STREAM IP COLON INTEGER COLON INTEGER */ + case 214: /* cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */ {yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &yymsp[-4].minor.yy0);} break; - case 212: /* cmd ::= KILL QUERY IP COLON INTEGER COLON INTEGER */ + case 215: /* cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */ {yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &yymsp[-4].minor.yy0);} break; default: diff --git a/src/client/src/tscAst.c b/src/client/src/tscAst.c index 845a9fd36d1a694d39e35f1ad3c0b06ec0f1b59c..d071358dbf0b9611eabac34d5f4a87eef0b6a646 100644 --- a/src/client/src/tscAst.c +++ b/src/client/src/tscAst.c @@ -13,12 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include - #include "os.h" #include "taosmsg.h" #include "tast.h" @@ -27,8 +21,12 @@ #include "tschemautil.h" #include "tsdb.h" #include "tskiplist.h" +#include "tsqldef.h" #include "tsqlfunction.h" +#include "tstoken.h" +#include "ttypes.h" #include "tutil.h" +#include "tscSQLParser.h" /* * @@ -43,10 +41,10 @@ */ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, SSQLToken *pToken); -static void tSQLSyntaxNodeDestroy(tSQLSyntaxNode *pNode, void (*fp)(void *)); +static void tSQLSyntaxNodeDestroy(tSQLSyntaxNode *pNode, void (*fp)(void *)); static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i); -static void destroySyntaxTree(tSQLSyntaxNode *); +static void destroySyntaxTree(tSQLSyntaxNode *); static uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tSQLSyntaxNode *pLeft, const tSQLSyntaxNode *pRight); @@ -110,11 +108,11 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, return NULL; } - int32_t i = 0; size_t nodeSize = sizeof(tSQLSyntaxNode); tSQLSyntaxNode *pNode = NULL; if (pToken->type == TK_ID || pToken->type == TK_TBNAME) { + int32_t i = 0; if (pToken->type == TK_ID) { do { size_t len = strlen(pSchema[i].name); @@ -261,8 +259,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha t0 = tStrGetToken(str, i, false, 0, NULL); if (t0.n == 0 || t0.type == TK_RP) { - if (pLeft->nodeType != TSQL_NODE_EXPR) { - // if left is not the expr, it is not a legal expr + if (pLeft->nodeType != TSQL_NODE_EXPR) { // if left is not the expr, it is not a legal expr tSQLSyntaxNodeDestroy(pLeft, NULL); return NULL; } @@ -271,8 +268,8 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha } // get the operator of expr - uint8_t optr = getBinaryExprOptr(&t0); - if (optr <= 0) { + uint8_t optr = getBinaryExprOptr(&t0); + if (optr == 0) { pError("not support binary operator:%d", t0.type); tSQLSyntaxNodeDestroy(pLeft, NULL); return NULL; @@ -326,13 +323,14 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha pn->colId = -1; return pn; } else { - int32_t optr = getBinaryExprOptr(&t0); - if (optr <= 0) { + uint8_t localOptr = getBinaryExprOptr(&t0); + if (localOptr == 0) { pError("not support binary operator:%d", t0.type); + free(pBinExpr); return NULL; } - return parseRemainStr(str, pBinExpr, pSchema, optr, numOfCols, i); + return parseRemainStr(str, pBinExpr, pSchema, localOptr, numOfCols, i); } } @@ -421,16 +419,17 @@ void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) { if (pExpr == NULL) { *dst = 0; *len = 0; + return; } - int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType); + int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType); dst += lhs; *len = lhs; - char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst); + char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst); *len += (start - dst); - *len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType); + *len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType); } static void UNUSED_FUNC destroySyntaxTree(tSQLSyntaxNode *pNode) { tSQLSyntaxNodeDestroy(pNode, NULL); } @@ -644,16 +643,15 @@ int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResults } /* - * + * traverse the result and apply the function to each item to check if the item is qualified or not */ -void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, bool (*fp)(tSkipListNode *, void *), - tQueryResultset * pResult) { +static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) { assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE); - // brutal force search + // brutal force scan the result list and check for each item in the list int64_t num = pResult->num; for (int32_t i = 0, j = 0; i < pResult->num; ++i) { - if (fp == NULL || (fp != NULL && fp(pResult->pRes[i], pExpr->info) == true)) { + if (fp == NULL || (fp(pResult->pRes[i], pExpr->info) == true)) { pResult->pRes[j++] = pResult->pRes[i]; } else { num--; @@ -936,4 +934,4 @@ void tQueryResultClean(tQueryResultset *pRes) { tfree(pRes->pRes); pRes->num = 0; -} \ No newline at end of file +} diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 6e0e1ae7c5c73720a53558701b01fb79af592e27..99b9b571d7ffe513e87206c5cd0c5d380318ca95 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -13,8 +13,7 @@ * along with this program. If not, see . */ -#include -#include +#include "os.h" #include "tlog.h" #include "trpc.h" @@ -23,8 +22,9 @@ #include "tscUtil.h" #include "tsclient.h" #include "tsocket.h" -#include "tsql.h" +#include "tscSQLParser.h" #include "tutil.h" +#include "tnote.h" void tscProcessFetchRow(SSchedMsg *pMsg); void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows); @@ -40,6 +40,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo */ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows); +// TODO return the correct error code to client in tscQueueAsyncError void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) { STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -50,20 +51,21 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, } int32_t sqlLen = strlen(sqlstr); - if (sqlLen > TSDB_MAX_SQL_LEN) { + if (sqlLen > tsMaxSQLStringLen) { tscError("sql string too long"); tscQueueAsyncError(fp, param); return; } - SSqlObj *pSql = (SSqlObj *)malloc(sizeof(SSqlObj)); + taosNotePrintTsc(sqlstr); + + SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); if (pSql == NULL) { tscError("failed to malloc sqlObj"); tscQueueAsyncError(fp, param); return; } - memset(pSql, 0, sizeof(SSqlObj)); SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -119,7 +121,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf // sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) { // vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx - assert(pCmd->vnodeIdx >= 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pMeterMetaInfo->vnodeIndex >= 0); /* reach the maximum number of output rows, abort */ if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { @@ -131,8 +134,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; pCmd->limit.offset = pRes->offset; - if ((++(pCmd->vnodeIdx)) < tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) { - tscTrace("%p retrieve data from next vnode:%d", pSql, pCmd->vnodeIdx); + if ((++(pMeterMetaInfo->vnodeIndex)) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + tscTrace("%p retrieve data from next vnode:%d", pSql, pMeterMetaInfo->vnodeIndex); pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. @@ -155,7 +158,6 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo SSqlObj *pSql = (SSqlObj *)tres; if (pSql == NULL) { // error tscError("sql object is NULL"); - tscQueueAsyncError(pSql->fetchFp, param); return; } @@ -270,7 +272,8 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) { /* * vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved */ - assert(pCmd->vnodeIdx >= 1); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pMeterMetaInfo->vnodeIndex >= 0); /* reach the maximum number of output rows, abort */ if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { @@ -281,7 +284,7 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) { /* update the limit value according to current retrieval results */ pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - if ((++pCmd->vnodeIdx) <= tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) { + if ((++pMeterMetaInfo->vnodeIndex) <= pMeterMetaInfo->pMetricMeta->numOfVnodes) { pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. tscResetForNextRetrieve(pRes); @@ -402,9 +405,12 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) int32_t code = TSDB_CODE_SUCCESS; assert(!pCmd->isInsertFromFile && pSql->signature == pSql); - + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pCmd->numOfTables == 1); + SDataBlockList *pDataBlocks = pCmd->pDataBlocks; - if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { // restore user defined fp pSql->fp = pSql->fetchFp; tscTrace("%p Async insertion completed, destroy data block list", pSql); @@ -416,17 +422,17 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) (*pSql->fp)(pSql->param, tres, numOfRows); } else { do { - code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pCmd->vnodeIdx++]); + code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pMeterMetaInfo->vnodeIndex++]); if (code != TSDB_CODE_SUCCESS) { tscTrace("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%d", - pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize, code); + pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize, code); } - } while (code != TSDB_CODE_SUCCESS && pCmd->vnodeIdx < pDataBlocks->nSize); + } while (code != TSDB_CODE_SUCCESS && pMeterMetaInfo->vnodeIndex < pDataBlocks->nSize); // build submit msg may fail if (code == TSDB_CODE_SUCCESS) { - tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize); + tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize); tscProcessSql(pSql); } } @@ -482,11 +488,11 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { // check if it is a sub-query of metric query first, if true, enter another routine if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pCmd->vnodeIdx >= 0 && pSql->param != NULL); + assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL); SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SSqlObj * pParObj = trs->pParentSqlObj; - assert(pParObj->signature == pParObj && trs->vnodeIdx == pCmd->vnodeIdx && + assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex && pMeterMetaInfo->pMeterMeta->numOfTags != 0); tscTrace("%p get metricMeta during metric query successfully", pSql); diff --git a/src/client/src/tscCache.c b/src/client/src/tscCache.c index 866b6e7dbc137c6e07d21d1e140ded39d1f4cd88..1ac32d7502ee99c38f84445cfeb767ad316b06ed 100644 --- a/src/client/src/tscCache.c +++ b/src/client/src/tscCache.c @@ -13,14 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "tglobalcfg.h" #include "tlog.h" @@ -32,7 +25,7 @@ typedef struct _c_hash_t { uint32_t ip; - short port; + uint16_t port; struct _c_hash_t *prev; struct _c_hash_t *next; void * data; @@ -52,14 +45,14 @@ typedef struct { void *pTimer; } SConnCache; -int taosHashConn(void *handle, uint32_t ip, short port, char *user) { +int taosHashConn(void *handle, uint32_t ip, uint16_t port, char *user) { SConnCache *pObj = (SConnCache *)handle; int hash = 0; // size_t user_len = strlen(user); hash = ip >> 16; hash += (unsigned short)(ip & 0xFFFF); - hash += (unsigned short)port; + hash += port; while (*user != '\0') { hash += *user; user++; @@ -81,7 +74,7 @@ void taosRemoveExpiredNodes(SConnCache *pObj, SConnHash *pNode, int hash, uint64 pNext = pNode->next; pObj->total--; pObj->count[hash]--; - tscTrace("%p ip:0x%x:%d:%d:%p removed, connections in cache:%d", pNode->data, pNode->ip, pNode->port, hash, pNode, + tscTrace("%p ip:0x%x:%hu:%d:%p removed, connections in cache:%d", pNode->data, pNode->ip, pNode->port, hash, pNode, pObj->count[hash]); taosMemPoolFree(pObj->connHashMemPool, (char *)pNode); pNode = pNext; @@ -93,7 +86,7 @@ void taosRemoveExpiredNodes(SConnCache *pObj, SConnHash *pNode, int hash, uint64 pObj->connHashList[hash] = NULL; } -void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, short port, char *user) { +void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, uint16_t port, char *user) { int hash; SConnHash * pNode; SConnCache *pObj; @@ -132,7 +125,7 @@ void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, short port, ch pthread_mutex_unlock(&pObj->mutex); - tscTrace("%p ip:0x%x:%d:%d:%p added, connections in cache:%d", data, ip, port, hash, pNode, pObj->count[hash]); + tscTrace("%p ip:0x%x:%hu:%d:%p added, connections in cache:%d", data, ip, port, hash, pNode, pObj->count[hash]); return pObj; } @@ -159,7 +152,7 @@ void taosCleanConnCache(void *handle, void *tmrId) { taosTmrReset(taosCleanConnCache, pObj->keepTimer * 2, pObj, pObj->tmrCtrl, &pObj->pTimer); } -void *taosGetConnFromCache(void *handle, uint32_t ip, short port, char *user) { +void *taosGetConnFromCache(void *handle, uint32_t ip, uint16_t port, char *user) { int hash; SConnHash * pNode; SConnCache *pObj; @@ -208,7 +201,7 @@ void *taosGetConnFromCache(void *handle, uint32_t ip, short port, char *user) { pthread_mutex_unlock(&pObj->mutex); if (pData) { - tscTrace("%p ip:0x%x:%d:%d:%p retrieved, connections in cache:%d", pData, ip, port, hash, pNode, pObj->count[hash]); + tscTrace("%p ip:0x%x:%hu:%d:%p retrieved, connections in cache:%d", pData, ip, port, hash, pNode, pObj->count[hash]); } return pData; diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 0f9be904128c76f57746fe5eb3c043d58cf2c311..37bd46c75b1d2628d7792c7f795c848f23943271 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -13,18 +13,6 @@ * along with this program. If not, see . */ -#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taosmsg.h" #include "tast.h" @@ -73,7 +61,16 @@ } \ } while (0); -void noop(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {} +#define DO_UPDATE_TAG_COLUMNS_WITHOUT_TS(ctx) \ +do {\ +for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \ + SQLFunctionCtx *__ctx = (ctx)->tagInfo.pTagCtxList[i]; \ + aAggs[TSDB_FUNC_TAG].xFunction(__ctx); \ + } \ +} while(0); + +void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {} +void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {} typedef struct tValuePair { tVariant v; @@ -114,7 +111,9 @@ typedef struct SFirstLastInfo { } SFirstLastInfo; typedef struct SFirstLastInfo SLastrowInfo; -typedef struct SPercentileInfo { tMemBucket *pMemBucket; } SPercentileInfo; +typedef struct SPercentileInfo { + tMemBucket *pMemBucket; +} SPercentileInfo; typedef struct STopBotInfo { int32_t num; @@ -128,9 +127,13 @@ typedef struct SLeastsquareInfo { int64_t num; } SLeastsquareInfo; -typedef struct SAPercentileInfo { SHistogramInfo *pHisto; } SAPercentileInfo; +typedef struct SAPercentileInfo { + SHistogramInfo *pHisto; +} SAPercentileInfo; -typedef struct STSCompInfo { STSBuf *pTSBuf; } STSCompInfo; +typedef struct STSCompInfo { + STSBuf *pTSBuf; +} STSCompInfo; int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int16_t *bytes, int16_t *intermediateResBytes, int16_t extLength, bool isSuperTable) { @@ -461,21 +464,32 @@ int32_t no_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId } \ }; -#define UPDATE_DATA(ctx, left, right, num, sign) \ - do { \ - if (((left) < (right)) ^ (sign)) { \ - (left) = right; \ - DO_UPDATE_TAG_COLUMNS(ctx, 0); \ - (num) += 1; \ - } \ - } while (0) +#define UPDATE_DATA(ctx, left, right, num, sign, k) \ + do { \ + if (((left) < (right)) ^ (sign)) { \ + (left) = (right); \ + DO_UPDATE_TAG_COLUMNS(ctx, k); \ + (num) += 1; \ + } \ + } while (0); + +#define DUPATE_DATA_WITHOUT_TS(ctx, left, right, num, sign) \ +do { \ + if (((left) < (right)) ^ (sign)) { \ + (left) = (right); \ + DO_UPDATE_TAG_COLUMNS_WITHOUT_TS(ctx); \ + (num) += 1; \ + } \ + } while (0); + #define LOOPCHECK_N(val, list, ctx, tsdbType, sign, num) \ for (int32_t i = 0; i < ((ctx)->size); ++i) { \ if ((ctx)->hasNull && isNull((char *)&(list)[i], tsdbType)) { \ continue; \ } \ - UPDATE_DATA(ctx, val, (list)[i], num, sign); \ + TSKEY key = (ctx)->ptsList[i]; \ + UPDATE_DATA(ctx, val, (list)[i], num, sign, key); \ } #define TYPED_LOOPCHECK_N(type, data, list, ctx, tsdbType, sign, notNullElems) \ @@ -494,10 +508,10 @@ static void do_sum(SQLFunctionCtx *pCtx) { assert(pCtx->size >= pCtx->preAggVals.numOfNull); if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { - int64_t *retVal = pCtx->aOutputBuf; + int64_t *retVal = (int64_t*) pCtx->aOutputBuf; *retVal += pCtx->preAggVals.sum; } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; *retVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.sum)); } } else { // computing based on the true data block @@ -505,7 +519,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { notNullElems = 0; if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { - int64_t *retVal = pCtx->aOutputBuf; + int64_t *retVal = (int64_t*) pCtx->aOutputBuf; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { LIST_ADD_N(*retVal, pCtx, pData, int8_t, notNullElems, pCtx->inputType); @@ -517,10 +531,10 @@ static void do_sum(SQLFunctionCtx *pCtx) { LIST_ADD_N(*retVal, pCtx, pData, int64_t, notNullElems, pCtx->inputType); } } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; LIST_ADD_N(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; LIST_ADD_N(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType); } } @@ -540,7 +554,7 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { } SET_VAL(pCtx, 1, 1); - int64_t *res = pCtx->aOutputBuf; + int64_t *res = (int64_t*) pCtx->aOutputBuf; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { *res += GET_INT8_VAL(pData); @@ -551,10 +565,10 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { *res += GET_INT64_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; *retVal += GET_DOUBLE_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; *retVal += GET_FLOAT_VAL(pData); } @@ -681,7 +695,7 @@ static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY return BLK_DATA_NO_NEEDED; } - SFirstLastInfo *pInfo = (pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { // data in current block is not earlier than current result @@ -695,7 +709,7 @@ static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY return BLK_DATA_NO_NEEDED; } - SFirstLastInfo *pInfo = (pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { @@ -829,7 +843,7 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) { static void avg_func_second_merge(SQLFunctionCtx *pCtx) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); - double *sum = pCtx->aOutputBuf; + double *sum = (double*) pCtx->aOutputBuf; char * input = GET_INPUT_CHAR(pCtx); for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { @@ -896,16 +910,18 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, index = pCtx->preAggVals.maxIndex; } + TSKEY key = pCtx->ptsList[index]; + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { int64_t val = GET_INT64_VAL(tval); if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { int8_t *data = (int8_t *)pOutput; - UPDATE_DATA(pCtx, *data, val, notNullElems, isMin); + UPDATE_DATA(pCtx, *data, val, notNullElems, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { int16_t *data = (int16_t *)pOutput; - UPDATE_DATA(pCtx, *data, val, notNullElems, isMin); + UPDATE_DATA(pCtx, *data, val, notNullElems, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { int32_t *data = (int32_t *)pOutput; #if defined(_DEBUG_VIEW) @@ -916,27 +932,27 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, *data = val; for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) { SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[i]; - if (__ctx->functionId == TSDB_FUNC_TAG_DUMMY) { - aAggs[TSDB_FUNC_TAG].xFunction(__ctx); - } else if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) { - *((int64_t *)__ctx->aOutputBuf) = pCtx->ptsList[index]; + if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) { + __ctx->tag = (tVariant){.i64Key = key, .nType = TSDB_DATA_TYPE_BIGINT}; } + + aAggs[TSDB_FUNC_TAG].xFunction(__ctx); } } } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { int64_t *data = (int64_t *)pOutput; - UPDATE_DATA(pCtx, *data, val, notNullElems, isMin); + UPDATE_DATA(pCtx, *data, val, notNullElems, isMin, key); } } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { double *data = (double *)pOutput; double val = GET_DOUBLE_VAL(tval); - UPDATE_DATA(pCtx, *data, val, notNullElems, isMin); + UPDATE_DATA(pCtx, *data, val, notNullElems, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { float *data = (float *)pOutput; double val = GET_DOUBLE_VAL(tval); - UPDATE_DATA(pCtx, *data, val, notNullElems, isMin); + UPDATE_DATA(pCtx, *data, val, notNullElems, isMin, key); } return; @@ -952,16 +968,18 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, TYPED_LOOPCHECK_N(int16_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems); } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { int32_t *pData = p; - int32_t *retVal = pOutput; + int32_t *retVal = (int32_t*) pOutput; for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) { continue; } if ((*retVal < pData[i]) ^ isMin) { *retVal = pData[i]; - DO_UPDATE_TAG_COLUMNS(pCtx, pCtx->ptsList[i]); + TSKEY k = pCtx->ptsList[i]; + + DO_UPDATE_TAG_COLUMNS(pCtx, k); } *notNullElems += 1; @@ -1024,10 +1042,10 @@ static bool max_func_setup(SQLFunctionCtx *pCtx) { *((int32_t *)pCtx->aOutputBuf) = INT32_MIN; break; case TSDB_DATA_TYPE_FLOAT: - *((float *)pCtx->aOutputBuf) = -FLT_MIN; + *((float *)pCtx->aOutputBuf) = -FLT_MAX; break; case TSDB_DATA_TYPE_DOUBLE: - *((double *)pCtx->aOutputBuf) = -DBL_MIN; + *((double *)pCtx->aOutputBuf) = -DBL_MAX; break; case TSDB_DATA_TYPE_BIGINT: *((int64_t *)pCtx->aOutputBuf) = INT64_MIN; @@ -1099,12 +1117,12 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp switch (type) { case TSDB_DATA_TYPE_TINYINT: { int8_t v = GET_INT8_VAL(input); - UPDATE_DATA(pCtx, *(int8_t *)output, v, notNullElems, isMin); + DUPATE_DATA_WITHOUT_TS(pCtx, *(int8_t *)output, v, notNullElems, isMin); break; }; case TSDB_DATA_TYPE_SMALLINT: { int16_t v = GET_INT16_VAL(input); - UPDATE_DATA(pCtx, *(int16_t *)output, v, notNullElems, isMin); + DUPATE_DATA_WITHOUT_TS(pCtx, *(int16_t *)output, v, notNullElems, isMin); break; } case TSDB_DATA_TYPE_INT: { @@ -1114,7 +1132,7 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp for (int32_t i = 0; i < pCtx->tagInfo.numOfTagCols; ++i) { SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[i]; - aAggs[TSDB_FUNC_TAG].xFunction(__ctx); + aAggs[TSDB_FUNC_TAG].xFunction(__ctx); } notNullElems++; @@ -1123,17 +1141,17 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp } case TSDB_DATA_TYPE_FLOAT: { float v = GET_FLOAT_VAL(input); - UPDATE_DATA(pCtx, *(float *)output, v, notNullElems, isMin); + DUPATE_DATA_WITHOUT_TS(pCtx, *(float *)output, v, notNullElems, isMin); break; } case TSDB_DATA_TYPE_DOUBLE: { double v = GET_DOUBLE_VAL(input); - UPDATE_DATA(pCtx, *(double *)output, v, notNullElems, isMin); + DUPATE_DATA_WITHOUT_TS(pCtx, *(double *)output, v, notNullElems, isMin); break; } case TSDB_DATA_TYPE_BIGINT: { int64_t v = GET_INT64_VAL(input); - UPDATE_DATA(pCtx, *(int64_t *)output, v, notNullElems, isMin); + DUPATE_DATA_WITHOUT_TS(pCtx, *(int64_t *)output, v, notNullElems, isMin); break; }; default: @@ -1189,38 +1207,39 @@ static void max_func_second_merge(SQLFunctionCtx *pCtx) { static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin) { char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + TSKEY key = pCtx->ptsList[index]; int32_t num = 0; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { int8_t *output = (int8_t *)pCtx->aOutputBuf; int8_t i = GET_INT8_VAL(pData); - UPDATE_DATA(pCtx, *output, i, num, isMin); + UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - int16_t *output = pCtx->aOutputBuf; + int16_t *output = (int16_t*) pCtx->aOutputBuf; int16_t i = GET_INT16_VAL(pData); - UPDATE_DATA(pCtx, *output, i, num, isMin); + UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - int32_t *output = pCtx->aOutputBuf; + int32_t *output = (int32_t*) pCtx->aOutputBuf; int32_t i = GET_INT32_VAL(pData); - UPDATE_DATA(pCtx, *output, i, num, isMin); + UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - int64_t *output = pCtx->aOutputBuf; + int64_t *output = (int64_t*) pCtx->aOutputBuf; int64_t i = GET_INT64_VAL(pData); - UPDATE_DATA(pCtx, *output, i, num, isMin); + UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - float *output = pCtx->aOutputBuf; + float *output = (float*) pCtx->aOutputBuf; float i = GET_FLOAT_VAL(pData); - UPDATE_DATA(pCtx, *output, i, num, isMin); + UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *output = pCtx->aOutputBuf; + double *output = (double*) pCtx->aOutputBuf; double i = GET_DOUBLE_VAL(pData); - UPDATE_DATA(pCtx, *output, i, num, isMin); + UPDATE_DATA(pCtx, *output, i, num, isMin, key); } GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; @@ -1281,7 +1300,7 @@ static void stddev_function(SQLFunctionCtx *pCtx) { switch (pCtx->inputType) { case TSDB_DATA_TYPE_INT: { for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull(&((int32_t *)pData)[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { continue; } *retVal += POW2(((int32_t *)pData)[i] - avg); @@ -1462,7 +1481,9 @@ static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) { SET_VAL(pCtx, 1, 1); memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); - DO_UPDATE_TAG_COLUMNS(pCtx, 0); + + TSKEY ts = pCtx->ptsList[index]; + DO_UPDATE_TAG_COLUMNS(pCtx, ts); SResultInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; @@ -1555,7 +1576,7 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) { SFirstLastInfo *pOutput = (SFirstLastInfo *)(pCtx->aOutputBuf + pCtx->inputBytes); if (pOutput->hasResult != DATA_SET_FLAG || pInput->ts < pOutput->ts) { memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes + sizeof(SFirstLastInfo)); - DO_UPDATE_TAG_COLUMNS(pCtx, 0); + DO_UPDATE_TAG_COLUMNS(pCtx, pInput->ts); } } @@ -1563,7 +1584,7 @@ static void first_dist_func_second_merge(SQLFunctionCtx *pCtx) { assert(pCtx->resultInfo->superTableQ); char * pData = GET_INPUT_CHAR(pCtx); - SFirstLastInfo *pInput = (pData + pCtx->outputBytes); + SFirstLastInfo *pInput = (SFirstLastInfo*) (pData + pCtx->outputBytes); if (pInput->hasResult != DATA_SET_FLAG) { return; } @@ -1603,7 +1624,9 @@ static void last_function(SQLFunctionCtx *pCtx) { } memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes); - DO_UPDATE_TAG_COLUMNS(pCtx, 0); + + TSKEY ts = pCtx->ptsList[i]; + DO_UPDATE_TAG_COLUMNS(pCtx, ts); SResultInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; @@ -1628,7 +1651,9 @@ static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { SET_VAL(pCtx, 1, 1); memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); - DO_UPDATE_TAG_COLUMNS(pCtx, 0); + + TSKEY ts = pCtx->ptsList[index]; + DO_UPDATE_TAG_COLUMNS(pCtx, ts); SResultInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; @@ -1642,7 +1667,7 @@ static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t ind if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[index]) { #if defined(_DEBUG_VIEW) - pTrace("assign index:%d, ts:%lld, val:%d, ", index, timestamp[index], *(int32_t *)pData); + pTrace("assign index:%d, ts:%" PRId64 ", val:%d, ", index, timestamp[index], *(int32_t *)pData); #endif memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); @@ -1725,7 +1750,7 @@ static void last_dist_func_merge(SQLFunctionCtx *pCtx) { if (pOutput->hasResult != DATA_SET_FLAG || pOutput->ts < pInput->ts) { memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes + sizeof(SFirstLastInfo)); - DO_UPDATE_TAG_COLUMNS(pCtx, 0); + DO_UPDATE_TAG_COLUMNS(pCtx, pInput->ts); } } @@ -1737,7 +1762,7 @@ static void last_dist_func_merge(SQLFunctionCtx *pCtx) { static void last_dist_func_second_merge(SQLFunctionCtx *pCtx) { char *pData = GET_INPUT_CHAR(pCtx); - SFirstLastInfo *pInput = (pData + pCtx->outputBytes); + SFirstLastInfo *pInput = (SFirstLastInfo*) (pData + pCtx->outputBytes); if (pInput->hasResult != DATA_SET_FLAG) { return; } @@ -1780,7 +1805,7 @@ static void last_row_function(SQLFunctionCtx *pCtx) { pInfo1->ts = pCtx->param[0].i64Key; pInfo1->hasResult = DATA_SET_FLAG; - DO_UPDATE_TAG_COLUMNS(pCtx, 0); + DO_UPDATE_TAG_COLUMNS(pCtx, pInfo1->ts); } SET_VAL(pCtx, pCtx->size, 1); @@ -1814,6 +1839,11 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6 memcpy(dst->pTags, pTags, (size_t)pTagInfo->tagsLen); } else { // the tags are dumped from the ctx tag fields for (int32_t i = 0; i < pTagInfo->numOfTagCols; ++i) { + SQLFunctionCtx* __ctx = pTagInfo->pTagCtxList[i]; + if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) { + __ctx->tag = (tVariant) {.nType = TSDB_DATA_TYPE_BIGINT, .i64Key = tsKey}; + } + tVariantDump(&pTagInfo->pTagCtxList[i]->tag, dst->pTags + size, pTagInfo->pTagCtxList[i]->tag.nType); size += pTagInfo->pTagCtxList[i]->outputBytes; } @@ -1835,11 +1865,12 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, tValuePair **pList = pInfo->res; if (pInfo->num < maxLen) { - if (pInfo->num == 0 || ((type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) && - val.i64Key >= pList[pInfo->num - 1]->v.i64Key) || + if (pInfo->num == 0 || + ((type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) && + val.i64Key >= pList[pInfo->num - 1]->v.i64Key) || ((type >= TSDB_DATA_TYPE_FLOAT && type <= TSDB_DATA_TYPE_DOUBLE) && val.dKey >= pList[pInfo->num - 1]->v.dKey)) { - valuePairAssign(pList[pInfo->num], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[pInfo->num], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage); } else { int32_t i = pInfo->num - 1; @@ -1855,7 +1886,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, } } - valuePairAssign(pList[i + 1], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i + 1], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); } pInfo->num++; @@ -1877,7 +1908,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, } } - valuePairAssign(pList[i], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); } } } @@ -1891,7 +1922,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa if (pInfo->num < maxLen) { if (pInfo->num == 0) { - valuePairAssign(pList[pInfo->num], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); } else { int32_t i = pInfo->num - 1; @@ -1907,7 +1938,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa } } - valuePairAssign(pList[i + 1], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i + 1], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage); } pInfo->num++; @@ -1929,7 +1960,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa } } - valuePairAssign(pList[i], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage); } } } @@ -2069,7 +2100,7 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi return true; } - tValuePair *pRes = pTopBotInfo->res; + tValuePair *pRes = (tValuePair*) pTopBotInfo->res; if (functionId == TSDB_FUNC_TOP) { switch (pCtx->inputType) { @@ -2121,7 +2152,7 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { // only the first_stage_merge is directly written data into final output buffer if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) { - return pCtx->aOutputBuf; + return (STopBotInfo*) pCtx->aOutputBuf; } else { // for normal table query and super table at the secondary_stage, result is written to intermediate buffer return pResInfo->interResultBuf; } @@ -2135,14 +2166,14 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { */ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo); - pTopBotInfo->res = tmp; + pTopBotInfo->res = (tValuePair**) tmp; tmp += POINTER_BYTES * pCtx->param[0].i64Key; size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; for (int32_t i = 0; i < pCtx->param[0].i64Key; ++i) { - pTopBotInfo->res[i] = tmp; + pTopBotInfo->res[i] = (tValuePair*) tmp; pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair); tmp += size; } @@ -2447,7 +2478,7 @@ static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) { - return pCtx->aOutputBuf; + return (SAPercentileInfo*) pCtx->aOutputBuf; } else { return pResInfo->interResultBuf; } @@ -2558,8 +2589,8 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_CHAR(pCtx); - pInput->pHisto = (char *)pInput + sizeof(SAPercentileInfo); - pInput->pHisto->elems = (char *)pInput->pHisto + sizeof(SHistogramInfo); + pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo)); + pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo)); if (pInput->pHisto->numOfElems <= 0) { return; @@ -2572,13 +2603,13 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { if (pHisto->numOfElems <= 0) { memcpy(pHisto, pInput->pHisto, size); - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); } else { - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN); memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN); - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); tHistogramDestroy(&pRes); } @@ -2590,8 +2621,8 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) { SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_CHAR(pCtx); - pInput->pHisto = (char *)pInput + sizeof(SAPercentileInfo); - pInput->pHisto->elems = (char *)pInput->pHisto + sizeof(SHistogramInfo); + pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo)); + pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo)); if (pInput->pHisto->numOfElems <= 0) { return; @@ -2602,9 +2633,9 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) { if (pHisto->numOfElems <= 0) { memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1)); - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); } else { - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN); tHistogramDestroy(&pOutput->pHisto); @@ -2698,7 +2729,7 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) { int32_t *p = pData; // LEASTSQR_CAL_LOOP(pCtx, param, pParamData, p); for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull(p, pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) p, pCtx->inputType)) { continue; } @@ -2840,6 +2871,10 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) { *(int64_t *)(pCtx->aOutputBuf) = pCtx->nStartQueryTimestamp; } +static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) { + date_col_output_function(pCtx); +} + static void col_project_function(SQLFunctionCtx *pCtx) { INC_INIT_VAL(pCtx, pCtx->size); @@ -2950,7 +2985,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { int32_t *pOutput = (int32_t *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } @@ -2982,7 +3017,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { int64_t *pOutput = (int64_t *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } @@ -3014,7 +3049,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { double *pOutput = (double *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } @@ -3044,7 +3079,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { float *pOutput = (float *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } @@ -3075,7 +3110,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { int16_t *pOutput = (int16_t *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } @@ -3159,7 +3194,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { } else { \ *(type *)(ctx)->aOutputBuf = *(type *)(d) - (*(type *)(&(ctx)->param[1].i64Key)); \ *(type *)(&(ctx)->param[1].i64Key) = *(type *)(d); \ - *(int64_t *)(ctx)->ptsOutputBuf = *(int64_t *)((ctx)->ptsList + (TSDB_KEYSIZE)*index); \ + *(int64_t *)(ctx)->ptsOutputBuf = (ctx)->ptsList[index]; \ } \ } while (0); @@ -3245,7 +3280,7 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) { pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); } -static bool arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { +static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz; @@ -3254,7 +3289,6 @@ static bool arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { arithmetic_callback_function); pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - return true; } #define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ @@ -3677,7 +3711,7 @@ static void getStatics_i16(int64_t *primaryKey, int16_t *data, int32_t numOfRow, // int16_t lastVal = TSDB_DATA_SMALLINT_NULL; for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_SMALLINT)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_SMALLINT)) { (*numOfNull) += 1; continue; } @@ -3717,7 +3751,7 @@ static void getStatics_i32(int64_t *primaryKey, int32_t *data, int32_t numOfRow, // int32_t lastVal = TSDB_DATA_INT_NULL; for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_INT)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_INT)) { (*numOfNull) += 1; continue; } @@ -3753,11 +3787,8 @@ static void getStatics_i64(int64_t *primaryKey, int64_t *data, int32_t numOfRow, assert(numOfRow <= INT16_MAX); - int64_t lastKey = 0; - int64_t lastVal = TSDB_DATA_BIGINT_NULL; - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_BIGINT)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_BIGINT)) { (*numOfNull) += 1; continue; } @@ -3786,28 +3817,33 @@ static void getStatics_i64(int64_t *primaryKey, int64_t *data, int32_t numOfRow, static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, double *min, double *max, double *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - *min = DBL_MAX; - *max = -DBL_MAX; - *minIndex = 0; - *maxIndex = 0; + float fmin = DBL_MAX; + float fmax = -DBL_MAX; + float fminIndex = 0; + float fmaxIndex = 0; + double dsum = 0; assert(numOfRow <= INT16_MAX); for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_FLOAT)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_FLOAT)) { (*numOfNull) += 1; continue; } - *sum += data[i]; - if (*min > data[i]) { - *min = data[i]; - *minIndex = i; + float fv = 0; + *(int32_t*)(&fv) = *(int32_t*)(&(data[i])); + + //*sum += data[i]; + dsum += fv; + if (fmin > fv) { + fmin = fv; + fminIndex = i; } - if (*max < data[i]) { - *max = data[i]; - *maxIndex = i; + if (fmax < fv) { + fmax = fv; + fmaxIndex = i; } // if (isNull(&lastVal, TSDB_DATA_TYPE_FLOAT)) { @@ -3819,35 +3855,48 @@ static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, dou // lastVal = data[i]; // } } + + double csum = 0; + *(int64_t*)(&csum) = *(int64_t*)sum; + csum += dsum; + *(int64_t*)(sum) = *(int64_t*)(&csum); + + *(int32_t*)max = *(int32_t*)(&fmax); + *(int32_t*)min = *(int32_t*)(&fmin); + *(int32_t*)minIndex = *(int32_t*)(&fminIndex); + *(int32_t*)maxIndex = *(int32_t*)(&fmaxIndex); + } static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, double *min, double *max, double *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - *min = DBL_MAX; - *max = -DBL_MAX; - *minIndex = 0; - *maxIndex = 0; + double dmin = DBL_MAX; + double dmax = -DBL_MAX; + double dminIndex = 0; + double dmaxIndex = 0; + double dsum = 0; assert(numOfRow <= INT16_MAX); - int64_t lastKey = 0; - double lastVal = TSDB_DATA_DOUBLE_NULL; - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_DOUBLE)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_DOUBLE)) { (*numOfNull) += 1; continue; } - *sum += data[i]; - if (*min > data[i]) { - *min = data[i]; - *minIndex = i; + double dv = 0; + *(int64_t*)(&dv) = *(int64_t*)(&(data[i])); + + //*sum += data[i]; + dsum += dv; + if (dmin > dv) { + dmin = dv; + dminIndex = i; } - if (*max < data[i]) { - *max = data[i]; - *maxIndex = i; + if (dmax < dv) { + dmax = dv; + dmaxIndex = i; } // if (isNull(&lastVal, TSDB_DATA_TYPE_DOUBLE)) { @@ -3859,6 +3908,16 @@ static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, do // lastVal = data[i]; // } } + + double csum = 0; + *(int64_t*)(&csum) = *(int64_t*)sum; + csum += dsum; + *(int64_t*)(sum) = *(int64_t*)(&csum); + + *(int64_t*)max = *(int64_t*)(&dmax); + *(int64_t*)min = *(int64_t*)(&dmin); + *(int64_t*)minIndex = *(int64_t*)(&dminIndex); + *(int64_t*)maxIndex = *(int64_t*)(&dmaxIndex); } void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max, @@ -3881,9 +3940,9 @@ void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, in } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) { getStatics_i64(primaryKey, (int64_t *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); } else if (type == TSDB_DATA_TYPE_DOUBLE) { - getStatics_d(primaryKey, (double *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); + getStatics_d(primaryKey, (double *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull); } else if (type == TSDB_DATA_TYPE_FLOAT) { - getStatics_f(primaryKey, (float *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); + getStatics_f(primaryKey, (float *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull); } } } @@ -4001,44 +4060,42 @@ static void twa_function(SQLFunctionCtx *pCtx) { // pCtx->numOfIteratedElems += notNullElems; } -static bool twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { +static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return true; + return; } - + SET_VAL(pCtx, 1, 1); - + TSKEY *primaryKey = pCtx->ptsList; - + SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STwaInfo * pInfo = pResInfo->interResultBuf; - + STwaInfo *pInfo = pResInfo->interResultBuf; + if (pInfo->lastKey == INT64_MIN) { pInfo->lastKey = pCtx->nStartQueryTimestamp; setTWALastVal(pCtx, pData, 0, pInfo); - + pInfo->hasResult = DATA_SET_FLAG; } - + if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT || pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { pInfo->dOutput += pInfo->dLastValue * (primaryKey[index] - pInfo->lastKey); } else { pInfo->iOutput += pInfo->iLastValue * (primaryKey[index] - pInfo->lastKey); } - + // record the last key/value pInfo->lastKey = primaryKey[index]; setTWALastVal(pCtx, pData, 0, pInfo); - + // pCtx->numOfIteratedElems += 1; pResInfo->hasResult = DATA_SET_FLAG; - + if (pResInfo->superTableQ) { memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(STwaInfo)); } - - return true; } static void twa_func_merge(SQLFunctionCtx *pCtx) { @@ -4050,7 +4107,7 @@ static void twa_func_merge(SQLFunctionCtx *pCtx) { int32_t numOfNotNull = 0; for (int32_t i = 0; i < pCtx->size; ++i, indicator += sizeof(STwaInfo)) { - STwaInfo *pInput = indicator; + STwaInfo *pInput = (STwaInfo*) indicator; if (pInput->hasResult != DATA_SET_FLAG) { continue; @@ -4152,7 +4209,7 @@ static void interp_function(SQLFunctionCtx *pCtx) { if (pCtx->outputType == TSDB_DATA_TYPE_FLOAT) { float v = GET_DOUBLE_VAL(pVal); - assignVal(pCtx->aOutputBuf, &v, pCtx->outputBytes, pCtx->outputType); + assignVal(pCtx->aOutputBuf, (const char*) &v, pCtx->outputBytes, pCtx->outputType); } else { assignVal(pCtx->aOutputBuf, pVal, pCtx->outputBytes, pCtx->outputType); } @@ -4303,173 +4360,427 @@ int32_t funcCompatDefList[28] = { */ 1, 1, 1, -1, 1, 1, 5}; -SQLAggFuncElem aAggs[28] = { - { - // 0, count function does not invoke the finalize function - "count", TSDB_FUNC_COUNT, TSDB_FUNC_COUNT, TSDB_BASE_FUNC_SO, function_setup, count_function, count_function_f, - no_next_step, noop, count_func_merge, count_func_merge, count_load_data_info, - }, - { - // 1 - "sum", TSDB_FUNC_SUM, TSDB_FUNC_SUM, TSDB_BASE_FUNC_SO, function_setup, sum_function, sum_function_f, - no_next_step, function_finalizer, sum_func_merge, sum_func_second_merge, precal_req_load_info, - }, - { - // 2 - "avg", TSDB_FUNC_AVG, TSDB_FUNC_AVG, TSDB_BASE_FUNC_SO, function_setup, avg_function, avg_function_f, - no_next_step, avg_finalizer, avg_func_merge, avg_func_second_merge, precal_req_load_info, - }, - { - // 3 - "min", TSDB_FUNC_MIN, TSDB_FUNC_MIN, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, min_func_setup, - min_function, min_function_f, no_next_step, function_finalizer, min_func_merge, min_func_second_merge, - precal_req_load_info, - }, - { - // 4 - "max", TSDB_FUNC_MAX, TSDB_FUNC_MAX, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, max_func_setup, - max_function, max_function_f, no_next_step, function_finalizer, max_func_merge, max_func_second_merge, - precal_req_load_info, - }, - { - // 5 - "stddev", TSDB_FUNC_STDDEV, TSDB_FUNC_INVALID_ID, TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, - function_setup, stddev_function, stddev_function_f, stddev_next_step, stddev_finalizer, noop, noop, - data_req_load_info, - }, - { - // 6 - "percentile", TSDB_FUNC_PERCT, TSDB_FUNC_INVALID_ID, - TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, percentile_function_setup, percentile_function, - percentile_function_f, no_next_step, percentile_finalizer, noop, noop, data_req_load_info, - }, - { - // 7 - "apercentile", TSDB_FUNC_APERCT, TSDB_FUNC_APERCT, - TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC, - apercentile_function_setup, apercentile_function, apercentile_function_f, no_next_step, apercentile_finalizer, - apercentile_func_merge, apercentile_func_second_merge, data_req_load_info, - }, - { - // 8 - "first", TSDB_FUNC_FIRST, TSDB_FUNC_FIRST_DST, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, function_setup, - first_function, first_function_f, no_next_step, function_finalizer, noop, noop, first_data_req_info, - }, - { - // 9 - "last", TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, function_setup, - last_function, last_function_f, no_next_step, function_finalizer, noop, noop, last_data_req_info, - }, - { - // 10 - "last_row", TSDB_FUNC_LAST_ROW, TSDB_FUNC_LAST_ROW, - TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS | - TSDB_FUNCSTATE_SELECTIVITY, - first_last_function_setup, last_row_function, noop, no_next_step, last_row_finalizer, noop, - last_dist_func_second_merge, data_req_load_info, - }, - { - // 11 - "top", TSDB_FUNC_TOP, TSDB_FUNC_TOP, TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_OF | - TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, - top_bottom_function_setup, top_function, top_function_f, no_next_step, top_bottom_func_finalizer, - top_func_merge, top_func_second_merge, data_req_load_info, - }, - { - // 12 - "bottom", TSDB_FUNC_BOTTOM, TSDB_FUNC_BOTTOM, TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_OF | - TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, - top_bottom_function_setup, bottom_function, bottom_function_f, no_next_step, top_bottom_func_finalizer, - bottom_func_merge, bottom_func_second_merge, data_req_load_info, - }, - { - // 13 - "spread", TSDB_FUNC_SPREAD, TSDB_FUNC_SPREAD, TSDB_BASE_FUNC_SO, spread_function_setup, spread_function, - spread_function_f, no_next_step, spread_function_finalizer, spread_func_merge, spread_func_sec_merge, - count_load_data_info, - }, - { - // 14 - "twa", TSDB_FUNC_TWA, TSDB_FUNC_TWA, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, twa_function_setup, - twa_function, twa_function_f, no_next_step, twa_function_finalizer, twa_func_merge, twa_function_copy, - data_req_load_info, - }, - { - // 15 - "leastsquares", TSDB_FUNC_LEASTSQR, TSDB_FUNC_INVALID_ID, - TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, leastsquares_function_setup, - leastsquares_function, leastsquares_function_f, no_next_step, leastsquares_finalizer, noop, noop, - data_req_load_info, - }, - { - // 16 - "ts", TSDB_FUNC_TS, TSDB_FUNC_TS, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, - date_col_output_function, date_col_output_function, no_next_step, noop, copy_function, copy_function, - no_data_info, - }, - { - // 17 - "ts", TSDB_FUNC_TS_DUMMY, TSDB_FUNC_TS_DUMMY, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, noop, - noop, no_next_step, noop, copy_function, copy_function, data_req_load_info, - }, - { - // 18 - "tag", TSDB_FUNC_TAG_DUMMY, TSDB_FUNC_TAG_DUMMY, TSDB_BASE_FUNC_SO, function_setup, tag_function, noop, - no_next_step, noop, copy_function, copy_function, no_data_info, - }, - { - // 19 - "ts", TSDB_FUNC_TS_COMP, TSDB_FUNC_TS_COMP, TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS, ts_comp_function_setup, - ts_comp_function, ts_comp_function_f, no_next_step, ts_comp_finalize, copy_function, copy_function, - data_req_load_info, - }, - { - // 20 - "tag", TSDB_FUNC_TAG, TSDB_FUNC_TAG, TSDB_BASE_FUNC_SO, function_setup, tag_function, tag_function_f, - no_next_step, noop, copy_function, copy_function, no_data_info, - }, - { - // 21, column project sql function - "colprj", TSDB_FUNC_PRJ, TSDB_FUNC_PRJ, TSDB_BASE_FUNC_MO | TSDB_FUNCSTATE_NEED_TS, function_setup, - col_project_function, col_project_function_f, no_next_step, noop, copy_function, copy_function, - data_req_load_info, - }, - { - // 22, multi-output, tag function has only one result - "tagprj", TSDB_FUNC_TAGPRJ, TSDB_FUNC_TAGPRJ, TSDB_BASE_FUNC_MO, function_setup, tag_project_function, - tag_project_function_f, no_next_step, noop, copy_function, copy_function, no_data_info, - }, - { - // 23 - "arithmetic", TSDB_FUNC_ARITHM, TSDB_FUNC_ARITHM, - TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, function_setup, arithmetic_function, - arithmetic_function_f, no_next_step, noop, copy_function, copy_function, data_req_load_info, - }, - { - // 24 - "diff", TSDB_FUNC_DIFF, TSDB_FUNC_INVALID_ID, TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS, diff_function_setup, - diff_function, diff_function_f, no_next_step, noop, noop, noop, data_req_load_info, - }, - // distributed version used in two-stage aggregation processes - { - // 25 - "first_dist", TSDB_FUNC_FIRST_DST, TSDB_FUNC_FIRST_DST, - TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, - first_dist_function, first_dist_function_f, no_next_step, function_finalizer, first_dist_func_merge, - first_dist_func_second_merge, first_dist_data_req_info, - }, - { - // 26 - "last_dist", TSDB_FUNC_LAST_DST, TSDB_FUNC_LAST_DST, - TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, - last_dist_function, last_dist_function_f, no_next_step, function_finalizer, last_dist_func_merge, - last_dist_func_second_merge, last_dist_data_req_info, - }, - { - // 27 - "interp", TSDB_FUNC_INTERP, TSDB_FUNC_INTERP, - TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, function_setup, - interp_function, - do_sum_f, // todo filter handle - no_next_step, noop, noop, copy_function, no_data_info, - }}; +SQLAggFuncElem aAggs[28] = {{ + // 0, count function does not invoke the finalize function + "count", + TSDB_FUNC_COUNT, + TSDB_FUNC_COUNT, + TSDB_BASE_FUNC_SO, + function_setup, + count_function, + count_function_f, + no_next_step, + noop1, + count_func_merge, + count_func_merge, + count_load_data_info, + }, + { + // 1 + "sum", + TSDB_FUNC_SUM, + TSDB_FUNC_SUM, + TSDB_BASE_FUNC_SO, + function_setup, + sum_function, + sum_function_f, + no_next_step, + function_finalizer, + sum_func_merge, + sum_func_second_merge, + precal_req_load_info, + }, + { + // 2 + "avg", + TSDB_FUNC_AVG, + TSDB_FUNC_AVG, + TSDB_BASE_FUNC_SO, + function_setup, + avg_function, + avg_function_f, + no_next_step, + avg_finalizer, + avg_func_merge, + avg_func_second_merge, + precal_req_load_info, + }, + { + // 3 + "min", + TSDB_FUNC_MIN, + TSDB_FUNC_MIN, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, + min_func_setup, + min_function, + min_function_f, + no_next_step, + function_finalizer, + min_func_merge, + min_func_second_merge, + precal_req_load_info, + }, + { + // 4 + "max", + TSDB_FUNC_MAX, + TSDB_FUNC_MAX, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, + max_func_setup, + max_function, + max_function_f, + no_next_step, + function_finalizer, + max_func_merge, + max_func_second_merge, + precal_req_load_info, + }, + { + // 5 + "stddev", + TSDB_FUNC_STDDEV, + TSDB_FUNC_INVALID_ID, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, + function_setup, + stddev_function, + stddev_function_f, + stddev_next_step, + stddev_finalizer, + noop1, + noop1, + data_req_load_info, + }, + { + // 6 + "percentile", + TSDB_FUNC_PERCT, + TSDB_FUNC_INVALID_ID, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, + percentile_function_setup, + percentile_function, + percentile_function_f, + no_next_step, + percentile_finalizer, + noop1, + noop1, + data_req_load_info, + }, + { + // 7 + "apercentile", + TSDB_FUNC_APERCT, + TSDB_FUNC_APERCT, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC, + apercentile_function_setup, + apercentile_function, + apercentile_function_f, + no_next_step, + apercentile_finalizer, + apercentile_func_merge, + apercentile_func_second_merge, + data_req_load_info, + }, + { + // 8 + "first", + TSDB_FUNC_FIRST, + TSDB_FUNC_FIRST_DST, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, + function_setup, + first_function, + first_function_f, + no_next_step, + function_finalizer, + noop1, + noop1, + first_data_req_info, + }, + { + // 9 + "last", + TSDB_FUNC_LAST, + TSDB_FUNC_LAST_DST, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, + function_setup, + last_function, + last_function_f, + no_next_step, + function_finalizer, + noop1, + noop1, + last_data_req_info, + }, + { + // 10 + "last_row", + TSDB_FUNC_LAST_ROW, + TSDB_FUNC_LAST_ROW, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS | + TSDB_FUNCSTATE_SELECTIVITY, + first_last_function_setup, + last_row_function, + noop2, + no_next_step, + last_row_finalizer, + noop1, + last_dist_func_second_merge, + data_req_load_info, + }, + { + // 11 + "top", + TSDB_FUNC_TOP, + TSDB_FUNC_TOP, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_NEED_TS | + TSDB_FUNCSTATE_SELECTIVITY, + top_bottom_function_setup, + top_function, + top_function_f, + no_next_step, + top_bottom_func_finalizer, + top_func_merge, + top_func_second_merge, + data_req_load_info, + }, + { + // 12 + "bottom", + TSDB_FUNC_BOTTOM, + TSDB_FUNC_BOTTOM, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_NEED_TS | + TSDB_FUNCSTATE_SELECTIVITY, + top_bottom_function_setup, + bottom_function, + bottom_function_f, + no_next_step, + top_bottom_func_finalizer, + bottom_func_merge, + bottom_func_second_merge, + data_req_load_info, + }, + { + // 13 + "spread", + TSDB_FUNC_SPREAD, + TSDB_FUNC_SPREAD, + TSDB_BASE_FUNC_SO, + spread_function_setup, + spread_function, + spread_function_f, + no_next_step, + spread_function_finalizer, + spread_func_merge, + spread_func_sec_merge, + count_load_data_info, + }, + { + // 14 + "twa", + TSDB_FUNC_TWA, + TSDB_FUNC_TWA, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + twa_function_setup, + twa_function, + twa_function_f, + no_next_step, + twa_function_finalizer, + twa_func_merge, + twa_function_copy, + data_req_load_info, + }, + { + // 15 + "leastsquares", + TSDB_FUNC_LEASTSQR, + TSDB_FUNC_INVALID_ID, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, + leastsquares_function_setup, + leastsquares_function, + leastsquares_function_f, + no_next_step, + leastsquares_finalizer, + noop1, + noop1, + data_req_load_info, + }, + { + // 16 + "ts", + TSDB_FUNC_TS, + TSDB_FUNC_TS, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + function_setup, + date_col_output_function, + date_col_output_function_f, + no_next_step, + noop1, + copy_function, + copy_function, + no_data_info, + }, + { + // 17 + "ts", + TSDB_FUNC_TS_DUMMY, + TSDB_FUNC_TS_DUMMY, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + function_setup, + noop1, + noop2, + no_next_step, + noop1, + copy_function, + copy_function, + data_req_load_info, + }, + { + // 18 + "tag", + TSDB_FUNC_TAG_DUMMY, + TSDB_FUNC_TAG_DUMMY, + TSDB_BASE_FUNC_SO, + function_setup, + tag_function, + noop2, + no_next_step, + noop1, + copy_function, + copy_function, + no_data_info, + }, + { + // 19 + "ts", + TSDB_FUNC_TS_COMP, + TSDB_FUNC_TS_COMP, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS, + ts_comp_function_setup, + ts_comp_function, + ts_comp_function_f, + no_next_step, + ts_comp_finalize, + copy_function, + copy_function, + data_req_load_info, + }, + { + // 20 + "tag", + TSDB_FUNC_TAG, + TSDB_FUNC_TAG, + TSDB_BASE_FUNC_SO, + function_setup, + tag_function, + tag_function_f, + no_next_step, + noop1, + copy_function, + copy_function, + no_data_info, + }, + { + // 21, column project sql function + "colprj", + TSDB_FUNC_PRJ, + TSDB_FUNC_PRJ, + TSDB_BASE_FUNC_MO | TSDB_FUNCSTATE_NEED_TS, + function_setup, + col_project_function, + col_project_function_f, + no_next_step, + noop1, + copy_function, + copy_function, + data_req_load_info, + }, + { + // 22, multi-output, tag function has only one result + "tagprj", + TSDB_FUNC_TAGPRJ, + TSDB_FUNC_TAGPRJ, + TSDB_BASE_FUNC_MO, + function_setup, + tag_project_function, + tag_project_function_f, + no_next_step, + noop1, + copy_function, + copy_function, + no_data_info, + }, + { + // 23 + "arithmetic", + TSDB_FUNC_ARITHM, + TSDB_FUNC_ARITHM, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, + function_setup, + arithmetic_function, + arithmetic_function_f, + no_next_step, + noop1, + copy_function, + copy_function, + data_req_load_info, + }, + { + // 24 + "diff", + TSDB_FUNC_DIFF, + TSDB_FUNC_INVALID_ID, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS, + diff_function_setup, + diff_function, + diff_function_f, + no_next_step, + noop1, + noop1, + noop1, + data_req_load_info, + }, + // distributed version used in two-stage aggregation processes + { + // 25 + "first_dist", + TSDB_FUNC_FIRST_DST, + TSDB_FUNC_FIRST_DST, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, + first_last_function_setup, + first_dist_function, + first_dist_function_f, + no_next_step, + function_finalizer, + first_dist_func_merge, + first_dist_func_second_merge, + first_dist_data_req_info, + }, + { + // 26 + "last_dist", + TSDB_FUNC_LAST_DST, + TSDB_FUNC_LAST_DST, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, + first_last_function_setup, + last_dist_function, + last_dist_function_f, + no_next_step, + function_finalizer, + last_dist_func_merge, + last_dist_func_second_merge, + last_dist_data_req_info, + }, + { + // 27 + "interp", + TSDB_FUNC_INTERP, + TSDB_FUNC_INTERP, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, + function_setup, + interp_function, + do_sum_f, // todo filter handle + no_next_step, + noop1, + noop1, + copy_function, + no_data_info, + }}; diff --git a/src/client/src/tscJoinProcess.c b/src/client/src/tscJoinProcess.c index 4965498eff0dbadbbf87d3e04d757e6afd002fa4..a94b308e87a784e55a01f2d19c0c7cba60b3beeb 100644 --- a/src/client/src/tscJoinProcess.c +++ b/src/client/src/tscJoinProcess.c @@ -13,16 +13,9 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include - #include "os.h" -#include "tcache.h" #include "tscJoinProcess.h" +#include "tcache.h" #include "tscUtil.h" #include "tsclient.h" #include "tscompression.h" @@ -52,8 +45,8 @@ static bool doCompare(int32_t order, int64_t left, int64_t right) { } } -static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, SJoinSubquerySupporter* pSupporter2, - TSKEY* st, TSKEY* et) { +static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, + SJoinSubquerySupporter* pSupporter2, TSKEY* st, TSKEY* et) { STSBuf* output1 = tsBufCreate(true); STSBuf* output2 = tsBufCreate(true); @@ -95,7 +88,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor #ifdef _DEBUG_VIEW // for debug purpose - tscPrint("%lld, tags:%d \t %lld, tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag); + tscPrint("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag); #endif if (elem1.tag < elem2.tag || (elem1.tag == elem2.tag && doCompare(order, elem1.ts, elem2.ts))) { @@ -157,22 +150,21 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor tsBufDestory(pSupporter1->pTSBuf); tsBufDestory(pSupporter2->pTSBuf); - tscTrace("%p input1:%lld, input2:%lld, %lld for secondary query after ts blocks intersecting", - pSql, numOfInput1, numOfInput2, output1->numOfTotal); + tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks intersecting", pSql, + numOfInput1, numOfInput2, output1->numOfTotal); return output1->numOfTotal; } -//todo handle failed to create sub query -SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) { +// todo handle failed to create sub query +SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, + /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) { SJoinSubquerySupporter* pSupporter = calloc(1, sizeof(SJoinSubquerySupporter)); if (pSupporter == NULL) { return NULL; } pSupporter->pObj = pSql; - pSupporter->hasMore = true; - pSupporter->pState = pState; pSupporter->subqueryIndex = index; @@ -233,12 +225,6 @@ bool needSecondaryQuery(SSqlObj* pSql) { * launch secondary stage query to fetch the result that contains timestamp in set */ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { - // TODO not launch secondary stage query - // if (!needSecondaryQuery(pSql)) { - // return; - // } - - // sub query may not be necessary int32_t numOfSub = 0; SJoinSubquerySupporter* pSupporter = NULL; @@ -246,15 +232,22 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { pSupporter = pSql->pSubs[i]->param; pSupporter->pState->numOfCompleted = 0; + /* + * If the columns are not involved in the final select clause, the secondary query will not be launched + * for the subquery. + */ if (pSupporter->exprsInfo.numOfExprs > 0) { ++numOfSub; } } // scan all subquery, if one sub query has only ts, ignore it - int32_t j = 0; - tscTrace("%p start to launch secondary subqueries: %d", pSql, pSql->numOfSubs); + tscTrace( + "%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in " + "select clause", + pSql, pSql->numOfSubs, numOfSub); + int32_t j = 0; for (int32_t i = 0; i < pSql->numOfSubs; ++i) { SSqlObj* pSub = pSql->pSubs[i]; pSupporter = pSub->param; @@ -266,15 +259,14 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { continue; } - SSqlObj* pNew = createSubqueryObj(pSql, 0, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL); + SSqlObj* pNew = createSubqueryObj(pSql, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { - pSql->numOfSubs = i; //revise the number of subquery + pSql->numOfSubs = i; // revise the number of subquery pSupporter->pState->numOfTotal = i; pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscDestroyJoinSupporter(pSupporter); - - return NULL; + return 0; } tscFreeSqlCmdData(&pNew->cmd); @@ -289,7 +281,6 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { pNew->cmd.type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE; pNew->cmd.nAggTimeInterval = pSupporter->interval; - pNew->cmd.limit = pSupporter->limit; pNew->cmd.groupbyExpr = pSupporter->groupbyExpr; tscColumnBaseInfoCopy(&pNew->cmd.colList, &pSupporter->colList, 0); @@ -309,18 +300,27 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); + /* + * When handling the projection query, the offset value will be modified for table-table join, which is changed + * during the timestamp intersection. + */ + pSupporter->limit = pSql->cmd.limit; + pNew->cmd.limit = pSupporter->limit; + // fetch the join tag column if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0); assert(pNew->cmd.tagCond.joinInfo.hasJoin); - int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid); + int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd.tagCond, pMeterMetaInfo->pMeterMeta->uid); pExpr->param[0].i64Key = tagColIndex; pExpr->numOfParams = 1; - - addRequiredTagColumn(&pNew->cmd, tagColIndex, 0); } +#ifdef _DEBUG_VIEW + tscPrintSelectClause(&pNew->cmd); +#endif + tscProcessSql(pNew); } @@ -360,7 +360,7 @@ static void doQuitSubquery(SSqlObj* pParentSql) { } static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter) { - if (__sync_add_and_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { pSqlObj->res.code = abs(pSupporter->pState->code); tscError("%p all subquery return and query failed, global code:%d", pSqlObj, pSqlObj->res.code); @@ -391,9 +391,9 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { return; } - if (numOfRows > 0) { // write the data into disk + if (numOfRows > 0) { // write the data into disk fwrite(pSql->res.data, pSql->res.numOfRows, 1, pSupporter->f); - fflush(pSupporter->f); + fclose(pSupporter->f); STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true); if (pBuf == NULL) { @@ -408,7 +408,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path); pSupporter->pTSBuf = pBuf; } else { - tsBufMerge(pSupporter->pTSBuf, pBuf, pSql->cmd.vnodeIdx); + assert(pSql->cmd.numOfTables == 1); // for subquery, only one metermetaInfo + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex); tsBufDestory(pBuf); } @@ -418,12 +421,25 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { pSql->res.row = pSql->res.numOfRows; taos_fetch_rows_a(tres, joinRetrieveCallback, param); - } else if (numOfRows == 0) { // no data from this vnode anymore - if (__sync_add_and_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + } else if (numOfRows == 0) { // no data from this vnode anymore + if (tscProjectionQueryOnMetric(&pParentSql->cmd)) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + assert(pSql->cmd.numOfTables == 1); + // for projection query, need to try next vnode + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + pSql->cmd.command = TSDB_SQL_SELECT; + pSql->fp = tscJoinQueryCallback; + tscProcessSql(pSql); + + return; + } + } + + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { tscTrace("%p sub:%p, numOfSub:%d, quit from further procedure due to other queries failure", pParentSql, tres, - pSupporter->subqueryIndex); + pSupporter->subqueryIndex); doQuitSubquery(pParentSql); return; } @@ -458,8 +474,33 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscError("%p retrieve failed, code:%d, index:%d", pSql, numOfRows, pSupporter->subqueryIndex); } - if (__sync_add_and_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { - tscTrace("%p secondary retrieve completed, global code:%d", tres, pParentSql->res.code); + if (numOfRows >= 0) { + pSql->res.numOfTotal += pSql->res.numOfRows; + } + + if (tscProjectionQueryOnMetric(&pSql->cmd) && numOfRows == 0) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + assert(pSql->cmd.numOfTables == 1); + + // for projection query, need to try next vnode if current vnode is exhausted + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + pSupporter->pState->numOfCompleted = 0; + pSupporter->pState->numOfTotal = 1; + + pSql->cmd.command = TSDB_SQL_SELECT; + pSql->fp = tscJoinQueryCallback; + tscProcessSql(pSql); + + return; + } + } + + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + assert(pSupporter->pState->numOfCompleted == pSupporter->pState->numOfTotal); + + tscTrace("%p all %d secondary retrieves are completed, global code:%d", tres, pSupporter->pState->numOfTotal, + pParentSql->res.code); + if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { pParentSql->res.code = abs(pSupporter->pState->code); freeSubqueryObj(pParentSql); @@ -473,50 +514,68 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { void tscFetchDatablockFromSubquery(SSqlObj* pSql) { int32_t numOfFetch = 0; - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param; + assert(pSql->numOfSubs >= 1); - SSqlRes* pRes = &pSql->pSubs[i]->res; - if (pRes->row >= pRes->numOfRows && pSupporter->hasMore) { - numOfFetch++; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlRes *pRes = &pSql->pSubs[i]->res; + SSqlCmd *pCmd = &pSql->pSubs[i]->cmd; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + + if (tscProjectionQueryOnMetric(pCmd)) { + if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes && + (!tscHasReachLimitation(pSql->pSubs[i]))) { + numOfFetch++; + } + } else { + if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pSql->pSubs[i]))) { + numOfFetch++; + } } } - if (numOfFetch > 0) { - tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch); + if (numOfFetch <= 0) { + return ; + } - SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param; - pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed - pSupporter->pState->numOfCompleted = 0; + // TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled + tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch); - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlObj* pSql1 = pSql->pSubs[i]; + SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param; + pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed + pSupporter->pState->numOfCompleted = 0; - SSqlRes* pRes1 = &pSql1->res; - SSqlCmd* pCmd1 = &pSql1->cmd; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj* pSql1 = pSql->pSubs[i]; - pSupporter = (SJoinSubquerySupporter*)pSql1->param; + SSqlRes* pRes1 = &pSql1->res; + SSqlCmd* pCmd1 = &pSql1->cmd; - // wait for all subqueries completed - pSupporter->pState->numOfTotal = numOfFetch; - if (pRes1->row >= pRes1->numOfRows && pSupporter->hasMore) { - tscTrace("%p subquery:%p retrieve data from vnode, index:%d", pSql, pSql1, pSupporter->subqueryIndex); + pSupporter = (SJoinSubquerySupporter*)pSql1->param; - tscResetForNextRetrieve(pRes1); + // wait for all subqueries completed + pSupporter->pState->numOfTotal = numOfFetch; + assert(pRes1->numOfRows >= 0 && pCmd1->numOfTables == 1); - pSql1->fp = joinRetrieveCallback; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); + + if (pRes1->row >= pRes1->numOfRows) { + tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1, + pSupporter->subqueryIndex, pMeterMetaInfo->vnodeIndex); - if (pCmd1->command < TSDB_SQL_LOCAL) { - pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; - } + tscResetForNextRetrieve(pRes1); + pSql1->fp = joinRetrieveCallback; - tscProcessSql(pSql1); + if (pCmd1->command < TSDB_SQL_LOCAL) { + pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } - } - // wait for all subquery completed - tsem_wait(&pSql->rspSem); + tscProcessSql(pSql1); + } } + + // wait for all subquery completed + tsem_wait(&pSql->rspSem); } // all subqueries return, set the result output index @@ -526,6 +585,10 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { tscTrace("%p all subquery response, retrieve data", pSql); + if (pRes->pColumnIndex != NULL) { + return; // the column transfer support struct has been built + } + pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols); for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { @@ -567,7 +630,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)param; - // if (__sync_add_and_fetch_32(pSupporter->numOfComplete, 1) >= + // if (atomic_add_fetch_32(pSupporter->numOfComplete, 1) >= // pSupporter->numOfTotal) { // SSqlObj *pParentObj = pSupporter->pObj; // @@ -612,23 +675,38 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { quitAllSubquery(pParentSql, pSupporter); } else { - if (__sync_add_and_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { tscSetupOutputColumnIndex(pParentSql); - if (pParentSql->fp == NULL) { - tsem_wait(&pParentSql->emptyRspSem); - tsem_wait(&pParentSql->emptyRspSem); - - tsem_post(&pParentSql->rspSem); - } else { - // set the command flag must be after the semaphore been correctly set. - // pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; - // if (pPObj->res.code == TSDB_CODE_SUCCESS) { - // (*pPObj->fp)(pPObj->param, pPObj, 0); - // } else { - // tscQueueAsyncRes(pPObj); - // } - assert(0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + /** + * if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of + * data instead of returning to its invoker + */ + if (pMeterMetaInfo->vnodeIndex > 0 && tscProjectionQueryOnMetric(&pSql->cmd)) { + assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes); + pSupporter->pState->numOfCompleted = 0; // reset the record value + + pSql->fp = joinRetrieveCallback; // continue retrieve data + pSql->cmd.command = TSDB_SQL_FETCH; + tscProcessSql(pSql); + } else { // first retrieve from vnode during the secondary stage sub-query + if (pParentSql->fp == NULL) { + tsem_wait(&pParentSql->emptyRspSem); + tsem_wait(&pParentSql->emptyRspSem); + + tsem_post(&pParentSql->rspSem); + } else { + // set the command flag must be after the semaphore been correctly set. + // pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; + // if (pPObj->res.code == TSDB_CODE_SUCCESS) { + // (*pPObj->fp)(pPObj->param, pPObj, 0); + // } else { + // tscQueueAsyncRes(pPObj); + // } + assert(0); + } } } } @@ -738,8 +816,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { strncpy(pTSBuf->path, path, PATH_MAX); - pTSBuf->f = fopen(pTSBuf->path, "r"); + pTSBuf->f = fopen(pTSBuf->path, "r+"); if (pTSBuf->f == NULL) { + free(pTSBuf); return NULL; } @@ -781,7 +860,8 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { size_t infoSize = sizeof(STSVnodeBlockInfo) * pTSBuf->numOfVnodes; STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize); - int64_t pos = ftell(pTSBuf->f); + + //int64_t pos = ftell(pTSBuf->f); //pos not used fread(buf, infoSize, 1, pTSBuf->f); // the length value for each vnode is not kept in file, so does not set the length value @@ -797,13 +877,17 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { struct stat fileStat; fstat(fileno(pTSBuf->f), &fileStat); - pTSBuf->fileSize = (uint32_t) fileStat.st_size; + pTSBuf->fileSize = (uint32_t)fileStat.st_size; tsBufResetPos(pTSBuf); // ascending by default pTSBuf->cur.order = TSQL_SO_ASC; pTSBuf->autoDelete = autoDelete; + + tscTrace("create tsBuf from file:%s, fd:%d, size:%d, numOfVnode:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), + pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete); + return pTSBuf; } @@ -821,12 +905,22 @@ void tsBufDestory(STSBuf* pTSBuf) { fclose(pTSBuf->f); if (pTSBuf->autoDelete) { + tscTrace("tsBuf %p destroyed, delete tmp file:%s", pTSBuf, pTSBuf->path); unlink(pTSBuf->path); + } else { + tscTrace("tsBuf %p destroyed, tmp file:%s, remains", pTSBuf, pTSBuf->path); } free(pTSBuf); } +static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) { + int32_t last = pTSBuf->numOfVnodes - 1; + + assert(last >= 0); + return &pTSBuf->pData[last]; +} + static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { if (pTSBuf->numOfAlloc <= pTSBuf->numOfVnodes) { uint32_t newSize = (uint32_t)(pTSBuf->numOfAlloc * 1.5); @@ -843,10 +937,10 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { } if (pTSBuf->numOfVnodes > 0) { - STSVnodeBlockInfo* pPrevBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info; + STSVnodeBlockInfoEx* pPrevBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pPrevBlockInfo); + TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pPrevBlockInfoEx->info); } // set initial value for vnode block @@ -864,9 +958,9 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { // update the header info STSBufFileHeader header = { .magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; - STSBufUpdateHeader(pTSBuf, &header); - return &pTSBuf->pData[pTSBuf->numOfVnodes - 1]; + STSBufUpdateHeader(pTSBuf, &header); + return tsBufGetLastVnodeInfo(pTSBuf); } static void shrinkBuffer(STSList* ptsData) { @@ -913,8 +1007,10 @@ static void writeDataToDisk(STSBuf* pTSBuf) { pTSBuf->tsData.len = 0; - pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.compLen += blockSize; - pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.numOfBlocks += 1; + STSVnodeBlockInfoEx* pVnodeBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); + + pVnodeBlockInfoEx->info.compLen += blockSize; + pVnodeBlockInfoEx->info.numOfBlocks += 1; shrinkBuffer(&pTSBuf->tsData); } @@ -1015,13 +1111,13 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData STSVnodeBlockInfoEx* pBlockInfo = NULL; STSList* ptsData = &pTSBuf->tsData; - if (pTSBuf->numOfVnodes == 0 || pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.vnode != vnodeId) { + if (pTSBuf->numOfVnodes == 0 || tsBufGetLastVnodeInfo(pTSBuf)->info.vnode != vnodeId) { writeDataToDisk(pTSBuf); shrinkBuffer(ptsData); pBlockInfo = addOneVnodeInfo(pTSBuf, vnodeId); } else { - pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1]; + pBlockInfo = tsBufGetLastVnodeInfo(pTSBuf); } assert(pBlockInfo->info.vnode == vnodeId); @@ -1044,6 +1140,8 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData pTSBuf->numOfTotal += len / TSDB_KEYSIZE; + // the size of raw data exceeds the size of the default prepared buffer, so + // during getBufBlock, the output buffer needs to be large enough. if (ptsData->len >= ptsData->threshold) { writeDataToDisk(pTSBuf); shrinkBuffer(ptsData); @@ -1060,10 +1158,10 @@ void tsBufFlush(STSBuf* pTSBuf) { writeDataToDisk(pTSBuf); shrinkBuffer(&pTSBuf->tsData); - STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info; + STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); + TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pBlockInfoEx->info); // save the ts order into header STSBufFileHeader header = { @@ -1164,11 +1262,22 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex } STSBlock* pBlock = &pTSBuf->block; + + size_t s = pBlock->numOfElem * TSDB_KEYSIZE; + + /* + * In order to accommodate all the qualified data, the actual buffer size for one block with identical tags value + * may exceed the maximum allowed size during *tsBufAppend* function by invoking expandBuffer function + */ + if (s > pTSBuf->tsData.allocSize) { + expandBuffer(&pTSBuf->tsData, s); + } + pTSBuf->tsData.len = tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf, pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize); - assert(pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem); + assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len)); pCur->vnodeIndex = vnodeIndex; pCur->blockIndex = blockIndex; @@ -1210,15 +1319,19 @@ bool tsBufNextPos(STSBuf* pTSBuf) { if (pCur->vnodeIndex == -1) { if (pCur->order == TSQL_SO_ASC) { tsBufGetBlock(pTSBuf, 0, 0); - // list is empty - if (pTSBuf->block.numOfElem == 0) { + + if (pTSBuf->block.numOfElem == 0) { // the whole list is empty, return tsBufResetPos(pTSBuf); return false; } else { return true; } - } else { + + } else { // get the last timestamp record in the last block of the last vnode + assert(pTSBuf->numOfVnodes > 0); + int32_t vnodeIndex = pTSBuf->numOfVnodes - 1; + pCur->vnodeIndex = vnodeIndex; int32_t vnodeId = pTSBuf->pData[pCur->vnodeIndex].info.vnode; STSVnodeBlockInfo* pBlockInfo = tsBufGetVnodeBlockInfo(pTSBuf, vnodeId); @@ -1253,6 +1366,10 @@ bool tsBufNextPos(STSBuf* pTSBuf) { pCur->vnodeIndex = -1; return false; } + + if (pBlockInfo == NULL) { + return false; + } int32_t blockIndex = pCur->order == TSQL_SO_ASC ? 0 : pBlockInfo->numOfBlocks - 1; tsBufGetBlock(pTSBuf, pCur->vnodeIndex + step, blockIndex); @@ -1321,7 +1438,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { tsBufFlush(pDestBuf); // compared with the last vnode id - if (vnodeId != pDestBuf->pData[pDestBuf->numOfVnodes - 1].info.vnode) { + if (vnodeId != tsBufGetLastVnodeInfo(pDestBuf)->info.vnode) { int32_t oldSize = pDestBuf->numOfVnodes; int32_t newSize = oldSize + pSrcBuf->numOfVnodes; @@ -1348,36 +1465,49 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { pDestBuf->numOfVnodes = newSize; } else { - STSVnodeBlockInfoEx* pBlockInfoEx = &pDestBuf->pData[pDestBuf->numOfVnodes - 1]; + STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pDestBuf); + pBlockInfoEx->len += pSrcBuf->pData[0].len; pBlockInfoEx->info.numOfBlocks += pSrcBuf->pData[0].info.numOfBlocks; pBlockInfoEx->info.compLen += pSrcBuf->pData[0].info.compLen; pBlockInfoEx->info.vnode = vnodeId; } - int64_t r = fseek(pDestBuf->f, 0, SEEK_END); + int32_t r = fseek(pDestBuf->f, 0, SEEK_END); assert(r == 0); int64_t offset = getDataStartOffset(); int32_t size = pSrcBuf->fileSize - offset; #ifdef LINUX - ssize_t rc = sendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size); + ssize_t rc = tsendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size); #else ssize_t rc = fsendfile(pDestBuf->f, pSrcBuf->f, &offset, size); #endif + if (rc == -1) { - printf("%s\n", strerror(errno)); + tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); return -1; } if (rc != size) { - printf("%s\n", strerror(errno)); + tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); return -1; } pDestBuf->numOfTotal += pSrcBuf->numOfTotal; + int32_t oldSize = pDestBuf->fileSize; + + struct stat fileStat; + fstat(fileno(pDestBuf->f), &fileStat); + pDestBuf->fileSize = (uint32_t)fileStat.st_size; + + assert(pDestBuf->fileSize == oldSize + size); + + tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf, + pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete); + return 0; } @@ -1394,7 +1524,7 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET); - fwrite((void*) pData, 1, len, pTSBuf->f); + fwrite((void*)pData, 1, len, pTSBuf->f); pTSBuf->fileSize += len; pTSBuf->tsOrder = order; @@ -1488,7 +1618,7 @@ void tsBufDisplay(STSBuf* pTSBuf) { while (tsBufNextPos(pTSBuf)) { STSElem elem = tsBufGetElem(pTSBuf); - printf("%d-%lld-%lld\n", elem.vnode, elem.tag, elem.ts); + printf("%d-%" PRId64 "-%" PRId64 "\n", elem.vnode, *(int64_t*) elem.tag, elem.ts); } pTSBuf->cur.order = old; diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 9afb74fec201353dc5980aa572a1a7af2fe4cae9..ecd2f97e394f50c5aed16ab7dea10fa3b9138cdb 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -13,9 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include +#include "os.h" #include "taosmsg.h" #include "tcache.h" @@ -28,7 +26,9 @@ #include "tschemautil.h" #include "tsocket.h" -static int32_t getToStringLength(char *pData, int32_t length, int32_t type) { +static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, size_t valueLength); + +static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) { char buf[512] = {0}; int32_t len = 0; @@ -41,7 +41,7 @@ static int32_t getToStringLength(char *pData, int32_t length, int32_t type) { case TSDB_DATA_TYPE_DOUBLE: { #ifdef _TD_ARM_32_ double dv = 0; - *(int64_t*)(&dv) = *(int64_t*)pData; + *(int64_t *)(&dv) = *(int64_t *)pData; len = sprintf(buf, "%f", dv); #else len = sprintf(buf, "%lf", *(double *)pData); @@ -49,12 +49,11 @@ static int32_t getToStringLength(char *pData, int32_t length, int32_t type) { if (strncasecmp("nan", buf, 3) == 0) { len = 4; } - } - break; + } break; case TSDB_DATA_TYPE_FLOAT: { #ifdef _TD_ARM_32_ float fv = 0; - *(int32_t*)(&fv) = *(int32_t*)pData; + *(int32_t *)(&fv) = *(int32_t *)pData; len = sprintf(buf, "%f", fv); #else len = sprintf(buf, "%f", *(float *)pData); @@ -62,11 +61,10 @@ static int32_t getToStringLength(char *pData, int32_t length, int32_t type) { if (strncasecmp("nan", buf, 3) == 0) { len = 4; } - } - break; + } break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - len = sprintf(buf, "%lld", *(int64_t *)pData); + len = sprintf(buf, "%" PRId64 "", *(int64_t *)pData); break; case TSDB_DATA_TYPE_BOOL: len = MAX_BOOL_TYPE_LENGTH; @@ -205,23 +203,21 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { case TSDB_DATA_TYPE_FLOAT: { #ifdef _TD_ARM_32_ float fv = 0; - *(int32_t*)(&fv) = *(int32_t*)pTagValue; + *(int32_t *)(&fv) = *(int32_t *)pTagValue; sprintf(target, "%f", fv); #else sprintf(target, "%f", *(float *)pTagValue); #endif - } - break; + } break; case TSDB_DATA_TYPE_DOUBLE: { #ifdef _TD_ARM_32_ double dv = 0; - *(int64_t*)(&dv) = *(int64_t*)pTagValue; + *(int64_t *)(&dv) = *(int64_t *)pTagValue; sprintf(target, "%lf", dv); #else sprintf(target, "%lf", *(double *)pTagValue); #endif - } - break; + } break; case TSDB_DATA_TYPE_TINYINT: sprintf(target, "%d", *(int8_t *)pTagValue); break; @@ -232,7 +228,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { sprintf(target, "%d", *(int32_t *)pTagValue); break; case TSDB_DATA_TYPE_BIGINT: - sprintf(target, "%lld", *(int64_t *)pTagValue); + sprintf(target, "%" PRId64 "", *(int64_t *)pTagValue); break; case TSDB_DATA_TYPE_BOOL: { char *val = (*((int8_t *)pTagValue) == 0) ? "false" : "true"; @@ -393,6 +389,68 @@ static int tscProcessQueryTags(SSqlObj *pSql) { } } +static void tscProcessCurrentUser(SSqlObj *pSql) { + SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + tscSetLocalQueryResult(pSql, pSql->pTscObj->user, pExpr->aliasName, TSDB_USER_LEN); +} + +static void tscProcessCurrentDB(SSqlObj *pSql) { + char db[TSDB_DB_NAME_LEN + 1] = {0}; + extractDBName(pSql->pTscObj->db, db); + + // no use db is invoked before. + if (strlen(db) == 0) { + setNull(db, TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN); + } + + SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + tscSetLocalQueryResult(pSql, db, pExpr->aliasName, TSDB_DB_NAME_LEN); +} + +static void tscProcessServerVer(SSqlObj *pSql) { + const char* v = pSql->pTscObj->sversion; + + SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + tscSetLocalQueryResult(pSql, v, pExpr->aliasName, tListLen(pSql->pTscObj->sversion)); +} + +static void tscProcessClientVer(SSqlObj *pSql) { + SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + tscSetLocalQueryResult(pSql, version, pExpr->aliasName, strlen(version)); +} + +static void tscProcessServStatus(SSqlObj *pSql) { + STscObj* pObj = pSql->pTscObj; + + if (pObj->pHb != NULL) { + if (pObj->pHb->res.code == TSDB_CODE_NETWORK_UNAVAIL) { + pSql->res.code = TSDB_CODE_NETWORK_UNAVAIL; + return; + } + } else { + if (pSql->res.code == TSDB_CODE_NETWORK_UNAVAIL) { + return; + } + } + + SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + tscSetLocalQueryResult(pSql, "1", pExpr->aliasName, 2); +} + +void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, size_t valueLength) { + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + pCmd->numOfCols = 1; + pCmd->order.order = TSQL_SO_ASC; + + tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength); + tscInitResObjForLocalQuery(pSql, 1, valueLength); + + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); + strncpy(pRes->data, val, pField->bytes); +} + int tscProcessLocalCmd(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; @@ -404,13 +462,23 @@ int tscProcessLocalCmd(SSqlObj *pSql) { pSql->res.code = (uint8_t)tscProcessQueryTags(pSql); } else if (pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { /* - * pass the qhandle check, in order to call partial release function to - * free allocated resources and remove the SqlObj from linked list + * set the qhandle to be 1 in order to pass the qhandle check, and to call partial release function to + * free allocated resources and remove the SqlObj from sql query linked list */ - pSql->res.qhandle = 0x1; // pass the qhandle check + pSql->res.qhandle = 0x1; pSql->res.numOfRows = 0; } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { taosClearDataCache(tscCacheHandle); + } else if (pCmd->command == TSDB_SQL_SERV_VERSION) { + tscProcessServerVer(pSql); + } else if (pCmd->command == TSDB_SQL_CLI_VERSION) { + tscProcessClientVer(pSql); + } else if (pCmd->command == TSDB_SQL_CURRENT_USER) { + tscProcessCurrentUser(pSql); + } else if (pCmd->command == TSDB_SQL_CURRENT_DB) { + tscProcessCurrentDB(pSql); + } else if (pCmd->command == TSDB_SQL_SERV_STATUS) { + tscProcessServStatus(pSql); } else { pSql->res.code = TSDB_CODE_INVALID_SQL; tscError("%p not support command:%d", pSql, pCmd->command); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 37a32e9ebb360ed0a3bc1f9f97a5b70955782ec5..2a6c80e06be660bf41ec9d1c5f03ee7596e96b13 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -18,25 +18,8 @@ #define _XOPEN_SOURCE -#pragma GCC diagnostic ignored "-Woverflow" -#pragma GCC diagnostic ignored "-Wunused-variable" - -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "ihash.h" #include "os.h" +#include "ihash.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tschemautil.h" @@ -48,18 +31,11 @@ #include "tstoken.h" #include "ttime.h" -#define INVALID_SQL_RET_MSG(p, ...) \ - do { \ - sprintf(p, __VA_ARGS__); \ - return TSDB_CODE_INVALID_SQL; \ - } while (0) - enum { TSDB_USE_SERVER_TS = 0, TSDB_USE_CLI_TS = 1, }; -static void setErrMsg(char *msg, const char *sql); static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize); static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) { @@ -77,6 +53,7 @@ static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) { radix = 2; } + errno = 0; *value = strtoll(pToken->z, endPtr, radix); return numType; @@ -87,13 +64,15 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) { if (TK_ILLEGAL == numType) { return numType; } + + errno = 0; *value = strtod(pToken->z, endPtr); return numType; } int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { - char * token; - int tokenlen; + //char * token; //fang not used + //int tokenlen; //fang not used int32_t index = 0; SSQLToken sToken; int64_t interval; @@ -111,7 +90,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1 } else { // strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm); if (taosParseTime(pToken->z, time, pToken->n, timePrec) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; + return tscInvalidSQLErrMsg(error, "invalid timestamp format", pToken->z); } return TSDB_CODE_SUCCESS; @@ -136,18 +115,21 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1 index = 0; sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL); pTokenEnd += index; + if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) { + index = 0; valueToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL); pTokenEnd += index; + if (valueToken.n < 2) { - strcpy(error, "value is expected"); - return TSDB_CODE_INVALID_SQL; + return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z); } if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } + if (timePrec == TSDB_TIME_PRECISION_MILLI) { interval /= 1000; } @@ -170,8 +152,8 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, int64_t iv; int32_t numType; char * endptr = NULL; - errno = 0; // reset global error code - + errno = 0; // clear the previous existed error information + switch (pSchema->type) { case TSDB_DATA_TYPE_BOOL: { // bool if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { @@ -182,7 +164,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } else if (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0) { *(uint8_t *)payload = TSDB_DATA_BOOL_NULL; } else { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z); } } else if (pToken->type == TK_INTEGER) { iv = strtoll(pToken->z, NULL, 10); @@ -193,7 +175,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } else if (pToken->type == TK_NULL) { *(uint8_t *)payload = TSDB_DATA_BOOL_NULL; } else { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z); } break; } @@ -205,13 +187,13 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, *((int8_t *)payload) = TSDB_DATA_TINYINT_NULL; } else { numType = tscToInteger(pToken, &iv, &endptr); - if (errno == ERANGE || iv > INT8_MAX || iv <= INT8_MIN) { - INVALID_SQL_RET_MSG(msg, "data is overflow"); - } else if (TK_ILLEGAL == numType) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + if (TK_ILLEGAL == numType) { + return tscInvalidSQLErrMsg(msg, "invalid tinyint data", pToken->z); + } else if (errno == ERANGE || iv > INT8_MAX || iv <= INT8_MIN) { + return tscInvalidSQLErrMsg(msg, "tinyint data overflow", pToken->z); } - *((int8_t *)payload) = (int8_t)iv; + *((int8_t *)payload) = (int8_t) iv; } break; @@ -224,10 +206,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, *((int16_t *)payload) = TSDB_DATA_SMALLINT_NULL; } else { numType = tscToInteger(pToken, &iv, &endptr); - if (errno == ERANGE || iv > INT16_MAX || iv <= INT16_MIN) { - INVALID_SQL_RET_MSG(msg, "data is overflow"); - } else if (TK_ILLEGAL == numType) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + if (TK_ILLEGAL == numType) { + return tscInvalidSQLErrMsg(msg, "invalid smallint data", pToken->z); + } else if (errno == ERANGE || iv > INT16_MAX || iv <= INT16_MIN) { + return tscInvalidSQLErrMsg(msg, "smallint data overflow", pToken->z); } *((int16_t *)payload) = (int16_t)iv; @@ -242,10 +224,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, *((int32_t *)payload) = TSDB_DATA_INT_NULL; } else { numType = tscToInteger(pToken, &iv, &endptr); - if (errno == ERANGE || iv > INT32_MAX || iv <= INT32_MIN) { - INVALID_SQL_RET_MSG(msg, "data is overflow"); - } else if (TK_ILLEGAL == numType) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + if (TK_ILLEGAL == numType) { + return tscInvalidSQLErrMsg(msg, "invalid int data", pToken->z); + } else if (errno == ERANGE || iv > INT32_MAX || iv <= INT32_MIN) { + return tscInvalidSQLErrMsg(msg, "int data overflow", pToken->z); } *((int32_t *)payload) = (int32_t)iv; @@ -261,10 +243,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; } else { numType = tscToInteger(pToken, &iv, &endptr); - if (errno == ERANGE || iv > INT64_MAX || iv <= INT64_MIN) { - INVALID_SQL_RET_MSG(msg, "data is overflow"); - } else if (TK_ILLEGAL == numType) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + if (TK_ILLEGAL == numType) { + return tscInvalidSQLErrMsg(msg, "invalid bigint data", pToken->z); + } else if (errno == ERANGE || iv > INT64_MAX || iv <= INT64_MIN) { + return tscInvalidSQLErrMsg(msg, "bigint data overflow", pToken->z); } *((int64_t *)payload) = iv; @@ -280,12 +262,12 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } else { double dv; if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z); } float fv = (float)dv; if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || (fv > FLT_MAX || fv < -FLT_MAX)) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z); } if (isinf(fv) || isnan(fv)) { @@ -305,11 +287,11 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } else { double dv; if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z); } if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || (dv > DBL_MAX || dv < -DBL_MAX)) { - INVALID_SQL_RET_MSG(msg, "data is illegal"); + return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z); } if (isinf(dv) || isnan(dv)) { @@ -324,11 +306,11 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, // binary data cannot be null-terminated char string, otherwise the last char of the string is lost if (pToken->type == TK_NULL) { *payload = TSDB_DATA_BINARY_NULL; - } else { - // too long values will return invalid sql, not be truncated automatically + } else { // too long values will return invalid sql, not be truncated automatically if (pToken->n > pSchema->bytes) { - INVALID_SQL_RET_MSG(msg, "value too long"); + return tscInvalidSQLErrMsg(msg, "string data overflow", pToken->z); } + strncpy(payload, pToken->z, pToken->n); } @@ -340,8 +322,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } else { // if the converted output len is over than pSchema->bytes, return error: 'Argument list too long' if (!taosMbsToUcs4(pToken->z, pToken->n, payload, pSchema->bytes)) { - sprintf(msg, "%s", strerror(errno)); - return TSDB_CODE_INVALID_SQL; + char buf[512] = {0}; + snprintf(buf, 512, "%s", strerror(errno)); + + return tscInvalidSQLErrMsg(msg, buf, pToken->z); } } break; @@ -356,8 +340,9 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } else { int64_t temp; if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; + return tscInvalidSQLErrMsg(msg, "invalid timestamp", pToken->z); } + *((int64_t *)payload) = temp; } @@ -365,18 +350,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } } - return 0; -} - -// todo merge the error msg function with tSQLParser -static void setErrMsg(char *msg, const char *sql) { - const char * msgFormat = "near \"%s\" syntax error"; - const int32_t BACKWARD_CHAR_STEP = 15; - - // only extract part of sql string,avoid too long sql string cause stack over flow - char buf[64] = {0}; - strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1); - sprintf(msg, msgFormat, buf); + return TSDB_CODE_SUCCESS; } /* @@ -399,7 +373,8 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start } } else { if (pDataBlocks->tsSource == TSDB_USE_SERVER_TS) { - return -1; + return -1; // client time/server time can not be mixed + } else if (pDataBlocks->tsSource == -1) { pDataBlocks->tsSource = TSDB_USE_CLI_TS; } @@ -414,10 +389,10 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start } int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error, - int16_t timePrec) { + int16_t timePrec, int32_t *code, char* tmpTokenBuf) { int32_t index = 0; - bool isPrevOptr; - SSQLToken sToken; + //bool isPrevOptr; //fang, never used + SSQLToken sToken = {0}; char * payload = pDataBlocks->pData + pDataBlocks->size; // 1. set the parsed value from sql string @@ -438,30 +413,55 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { continue; } + strcpy(error, "client out of memory"); + *code = TSDB_CODE_CLI_OUT_OF_MEMORY; return -1; } if (((sToken.type != TK_NOW) && (sToken.type != TK_INTEGER) && (sToken.type != TK_STRING) && (sToken.type != TK_FLOAT) && (sToken.type != TK_BOOL) && (sToken.type != TK_NULL)) || (sToken.n == 0) || (sToken.type == TK_RP)) { - setErrMsg(error, *str); + tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z); + *code = TSDB_CODE_INVALID_SQL; return -1; } // Remove quotation marks if (TK_STRING == sToken.type) { - sToken.z++; - sToken.n -= 2; + // delete escape character: \\, \', \" + char delim = sToken.z[0]; + int32_t cnt = 0; + int32_t j = 0; + for (int32_t k = 1; k < sToken.n - 1; ++k) { + if (sToken.z[k] == delim || sToken.z[k] == '\\') { + if (sToken.z[k + 1] == delim) { + cnt++; + tmpTokenBuf[j] = sToken.z[k + 1]; + j++; + k++; + continue; + } + } + + tmpTokenBuf[j] = sToken.z[k]; + j++; + } + tmpTokenBuf[j] = 0; + sToken.z = tmpTokenBuf; + sToken.n -= 2 + cnt; } bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec); if (ret != TSDB_CODE_SUCCESS) { + *code = TSDB_CODE_INVALID_SQL; return -1; // NOTE: here 0 mean error! } if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { + tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z); + *code = TSDB_CODE_INVALID_TIME_STAMP; return -1; } } @@ -471,8 +471,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ char *ptr = payload; for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (!spd->hasVal[i]) { - // current column do not have any value to insert, set it to null + if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null setNull(ptr, schema[i].type, schema[i].bytes); } @@ -497,7 +496,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) { } int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMeta, int maxRows, - SParsedDataColInfo *spd, char *error) { + SParsedDataColInfo *spd, char *error, int32_t *code, char* tmpTokenBuf) { int32_t index = 0; SSQLToken sToken; @@ -508,6 +507,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe if (spd->hasVal[0] == false) { strcpy(error, "primary timestamp column can not be null"); + *code = TSDB_CODE_INVALID_SQL; return -1; } @@ -519,16 +519,17 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe *str += index; if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) { int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize); - if (0 == tSize) { + if (0 == tSize) { //TODO pass the correct error code to client strcpy(error, "client out of memory"); + *code = TSDB_CODE_CLI_OUT_OF_MEMORY; return -1; } + maxRows += tSize; } - int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision); - if (len <= 0) { - setErrMsg(error, *str); + int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf); + if (len <= 0) { // error message has been set in tsParseOneRowData return -1; } @@ -538,7 +539,8 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe sToken = tStrGetToken(*str, &index, false, 0, NULL); *str += index; if (sToken.n == 0 || sToken.type != TK_RP) { - setErrMsg(error, *str); + tscInvalidSQLErrMsg(error, ") expected", *str); + *code = TSDB_CODE_INVALID_SQL; return -1; } @@ -547,6 +549,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe if (numOfRows <= 0) { strcpy(error, "no any data points"); + *code = TSDB_CODE_INVALID_SQL; return -1; } else { return numOfRows; @@ -658,10 +661,17 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char if (0 == maxNumOfRows) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } + + int32_t code = TSDB_CODE_INVALID_SQL; + char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" + if (NULL == tmpTokenBuf) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } - int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload); + int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf); + free(tmpTokenBuf); if (numOfRows <= 0) { - return TSDB_CODE_INVALID_SQL; + return code; } for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) { @@ -733,8 +743,7 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { return TSDB_CODE_INVALID_SQL; } - if (sToken.type == TK_USING) { - // create table if not exists + if (sToken.type == TK_USING) { // create table if not exists index = 0; sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; @@ -750,25 +759,87 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { } if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - strcpy(pCmd->payload, "create table only from super table is allowed"); - return TSDB_CODE_INVALID_SQL; + return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z); } - char * tagVal = pTag->data; SSchema *pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); index = 0; sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; + + SParsedDataColInfo spd = {0}; + uint8_t numOfTags = pMeterMetaInfo->pMeterMeta->numOfTags; + spd.numOfCols = numOfTags; + + // if specify some tags column + if (sToken.type != TK_LP) { + tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags); + } else { + /* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen) tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */ + int16_t offset[TSDB_MAX_COLUMNS] = {0}; + for (int32_t t = 1; t < numOfTags; ++t) { + offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes; + } + + while (1) { + index = 0; + sToken = tStrGetToken(sql, &index, false, 0, NULL); + sql += index; + + if (TK_STRING == sToken.type) { + sToken.n = strdequote(sToken.z); + strtrim(sToken.z); + sToken.n = (uint32_t)strlen(sToken.z); + } + + if (sToken.type == TK_RP) { + break; + } + + bool findColumnIndex = false; + + // todo speedup by using hash list + for (int32_t t = 0; t < numOfTags; ++t) { + if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) { + SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++]; + pElem->offset = offset[t]; + pElem->colIndex = t; + + if (spd.hasVal[t] == true) { + return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z); + } + + spd.hasVal[t] = true; + findColumnIndex = true; + break; + } + } + + if (!findColumnIndex) { + return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken.z); + } + } + + if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > numOfTags) { + return tscInvalidSQLErrMsg(pCmd->payload, "tag name expected", sToken.z); + } + + index = 0; + sToken = tStrGetToken(sql, &index, false, 0, NULL); + sql += index; + } + if (sToken.type != TK_TAGS) { - setErrMsg(pCmd->payload, sql); - return TSDB_CODE_INVALID_SQL; + return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z); } - int32_t numOfTagValues = 0; uint32_t ignoreTokenTypes = TK_LP; uint32_t numOfIgnoreToken = 1; - while (1) { + for (int i = 0; i < spd.numOfAssignedCols; ++i) { + char* tagVal = pTag->data + spd.elems[i].offset; + int16_t colIndex = spd.elems[i].colIndex; + index = 0; sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes); sql += index; @@ -784,31 +855,39 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { sToken.n -= 2; } - code = tsParseOneColumnData(&pTagSchema[numOfTagValues], &sToken, tagVal, pCmd->payload, &sql, false, - pMeterMetaInfo->pMeterMeta->precision); + code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, pMeterMetaInfo->pMeterMeta->precision); if (code != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd->payload, sql); - return TSDB_CODE_INVALID_SQL; + return code; } - if ((pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_BINARY || - pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_NCHAR) && - sToken.n > pTagSchema[numOfTagValues].bytes) { - strcpy(pCmd->payload, "tag value too long"); - return TSDB_CODE_INVALID_SQL; + if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY || + pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) && sToken.n > pTagSchema[colIndex].bytes) { + return tscInvalidSQLErrMsg(pCmd->payload, "string too long", sToken.z); } + } - tagVal += pTagSchema[numOfTagValues++].bytes; + index = 0; + sToken = tStrGetToken(sql, &index, false, 0, NULL); + sql += index; + if (sToken.n == 0 || sToken.type != TK_RP) { + return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z); } - if (numOfTagValues != pMeterMetaInfo->pMeterMeta->numOfTags) { - setErrMsg(pCmd->payload, sql); - return TSDB_CODE_INVALID_SQL; + // 2. set the null value for the columns that do not assign values + if (spd.numOfAssignedCols < spd.numOfCols) { + char *ptr = pTag->data; + + for (int32_t i = 0; i < spd.numOfCols; ++i) { + if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null + setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes); + } + + ptr += pTagSchema[i].bytes; + } } if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd->payload, sql); - return TSDB_CODE_INVALID_SQL; + return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr); } int32_t ret = setMeterID(pSql, &tableToken, 0); @@ -858,27 +937,29 @@ int validateTableName(char *tblName, int len) { * @param pSql * @return */ -int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { +int doParserInsertSql(SSqlObj *pSql, char *str) { SSqlCmd *pCmd = &pSql->cmd; - - pCmd->command = TSDB_SQL_INSERT; - pCmd->isInsertFromFile = -1; - pCmd->count = 0; - - pSql->res.numOfRows = 0; + + int32_t code = TSDB_CODE_INVALID_SQL; int32_t totalNum = 0; - int code = TSDB_CODE_INVALID_SQL; - SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { return code; } - void *pTableHashList = taosInitIntHash(128, sizeof(void *), taosHashInt); - - pSql->cmd.pDataBlocks = tscCreateBlockArrayList(); + if ((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) { + pSql->pTableHashList = taosInitIntHash(128, POINTER_BYTES, taosHashInt); + pSql->cmd.pDataBlocks = tscCreateBlockArrayList(); + if (NULL == pSql->pTableHashList || NULL == pSql->cmd.pDataBlocks) { + code = TSDB_CODE_CLI_OUT_OF_MEMORY; + goto _error_clean; + } + } else { + str = pSql->asyncTblPos; + } + tscTrace("%p create data block list for submit data, %p", pSql, pSql->cmd.pDataBlocks); while (1) { @@ -897,13 +978,15 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { } } + pSql->asyncTblPos = sToken.z; + // Check if the table name available or not if (validateTableName(sToken.z, sToken.n) != TSDB_CODE_SUCCESS) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "table name is invalid"); + code = tscInvalidSQLErrMsg(pCmd->payload, "table name invalid", sToken.z); goto _error_clean; } + //TODO refactor if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) { goto _error_clean; } @@ -911,7 +994,8 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { void *fp = pSql->fp; if ((code = tscParseSqlForCreateTableOnDemand(&str, pSql)) != TSDB_CODE_SUCCESS) { if (fp != NULL) { - goto _clean; + //goto _clean; + return code; } else { /* * for async insert, the free data block operations, which is tscDestroyBlockArrayList, @@ -923,8 +1007,7 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { } if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "insert data into metric is not supported"); + code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL); goto _error_clean; } @@ -932,8 +1015,7 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { sToken = tStrGetToken(str, &index, false, 0, NULL); str += index; if (sToken.n == 0) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "keyword VALUES or FILE are required"); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z); goto _error_clean; } @@ -947,8 +1029,7 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { pCmd->isInsertFromFile = 0; } else { if (pCmd->isInsertFromFile == 1) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up"); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); goto _error_clean; } } @@ -957,18 +1038,16 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { * app here insert data in different vnodes, so we need to set the following * data in another submit procedure using async insert routines */ - code = doParseInsertStatement(pSql, pTableHashList, &str, &spd, &totalNum); + code = doParseInsertStatement(pSql, pSql->pTableHashList, &str, &spd, &totalNum); if (code != TSDB_CODE_SUCCESS) { goto _error_clean; } - } else if (sToken.type == TK_FILE) { if (pCmd->isInsertFromFile == -1) { pCmd->isInsertFromFile = 1; } else { if (pCmd->isInsertFromFile == 0) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up"); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); goto _error_clean; } } @@ -977,8 +1056,7 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { sToken = tStrGetToken(str, &index, false, 0, NULL); str += index; if (sToken.n == 0) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "file path is required following keyword FILE"); + code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); goto _error_clean; } @@ -988,14 +1066,13 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { wordexp_t full_path; if (wordexp(fname, &full_path, 0) != 0) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "invalid filename"); + code = tscInvalidSQLErrMsg(pCmd->payload, "invalid filename", sToken.z); goto _error_clean; } strcpy(fname, full_path.we_wordv[0]); wordfree(&full_path); - STableDataBlocks *pDataBlock = tscCreateDataBlockEx(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize, + STableDataBlocks *pDataBlock = tscCreateDataBlock(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name); tscAppendDataBlock(pCmd->pDataBlocks, pDataBlock); @@ -1008,8 +1085,7 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { if (pCmd->isInsertFromFile == -1) { pCmd->isInsertFromFile = 0; } else if (pCmd->isInsertFromFile == 1) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up"); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); goto _error_clean; } @@ -1046,8 +1122,7 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { pElem->colIndex = t; if (spd.hasVal[t] == true) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "duplicated column name"); + code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z); goto _error_clean; } @@ -1058,15 +1133,13 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { } if (!findColumnIndex) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "invalid column name"); + code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column name", sToken.z); goto _error_clean; } } if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > pMeterMeta->numOfColumns) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "column name expected"); + code = tscInvalidSQLErrMsg(pCmd->payload, "column name expected", sToken.z); goto _error_clean; } @@ -1075,18 +1148,16 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { str += index; if (sToken.type != TK_VALUES) { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "keyword VALUES is expected"); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z); goto _error_clean; } - code = doParseInsertStatement(pSql, pTableHashList, &str, &spd, &totalNum); + code = doParseInsertStatement(pSql, pSql->pTableHashList, &str, &spd, &totalNum); if (code != TSDB_CODE_SUCCESS) { goto _error_clean; } } else { - code = TSDB_CODE_INVALID_SQL; - sprintf(pCmd->payload, "keyword VALUES or FILE are required"); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z); goto _error_clean; } } @@ -1095,7 +1166,7 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { if (pCmd->numOfParams > 0) { goto _clean; } - + // submit to more than one vnode if (pCmd->pDataBlocks->nSize > 0) { // merge according to vgid @@ -1108,8 +1179,10 @@ int tsParseInsertStatement(SSqlObj *pSql, char *str, char *acct, char *db) { goto _error_clean; } + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + // set the next sent data vnode index in data block arraylist - pCmd->vnodeIdx = 1; + pMeterMetaInfo->vnodeIndex = 1; } else { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); } @@ -1121,7 +1194,8 @@ _error_clean: pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); _clean: - taosCleanUpIntHash(pTableHashList); + taosCleanUpIntHash(pSql->pTableHashList); + pSql->pTableHashList = NULL; return code; } @@ -1130,29 +1204,25 @@ int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) { return TSDB_CODE_NO_RIGHTS; } - int32_t index = 0; + int32_t index = 0; SSqlCmd *pCmd = &pSql->cmd; SSQLToken sToken = tStrGetToken(sql, &index, false, 0, NULL); - if (sToken.type == TK_IMPORT) { - pCmd->order.order = TSQL_SO_ASC; - } else if (sToken.type != TK_INSERT) { - if (sToken.n) { - sToken.z[sToken.n] = 0; - sprintf(pCmd->payload, "invalid keyword:%s", sToken.z); - } else { - strcpy(pCmd->payload, "no any keywords"); - } - return TSDB_CODE_INVALID_SQL; - } - + + assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT); + pCmd->import = (sToken.type == TK_IMPORT); + sToken = tStrGetToken(sql, &index, false, 0, NULL); if (sToken.type != TK_INTO) { - strcpy(pCmd->payload, "keyword INTO is expected"); - return TSDB_CODE_INVALID_SQL; + return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z); } - - return tsParseInsertStatement(pSql, sql + index, acct, db); + + pCmd->count = 0; + pCmd->command = TSDB_SQL_INSERT; + pCmd->isInsertFromFile = -1; + pSql->res.numOfRows = 0; + + return doParserInsertSql(pSql, sql + index); } int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) { @@ -1160,7 +1230,11 @@ int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) { // must before clean the sqlcmd object tscRemoveAllMeterMetaInfo(&pSql->cmd, false); - tscCleanSqlCmd(&pSql->cmd); + + if (NULL == pSql->asyncTblPos) { + tscTrace("continue parse sql: %s", pSql->asyncTblPos); + tscCleanSqlCmd(&pSql->cmd); + } if (tscIsInsertOrImportData(pSql->sqlstr)) { /* @@ -1225,7 +1299,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock return TSDB_CODE_SUCCESS; } -static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) { +static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) { size_t readLen = 0; char * line = NULL; size_t n = 0; @@ -1240,8 +1314,8 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) { int32_t rowSize = pMeterMeta->rowSize; pCmd->pDataBlocks = tscCreateBlockArrayList(); - STableDataBlocks *pTableDataBlock = - tscCreateDataBlockEx(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name); + STableDataBlocks *pTableDataBlock = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, + sizeof(SShellSubmitBlock), pMeterMetaInfo->name); tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock); @@ -1257,7 +1331,7 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) { while ((readLen = getline(&line, &n, fp)) != -1) { // line[--readLen] = '\0'; if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0; - if (readLen <= 0) continue; + if (readLen == 0) continue; //fang, <= to == char *lineptr = line; strtolower(line, line); @@ -1268,11 +1342,12 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) { maxRows += tSize; } - len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision); + len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf); if (len <= 0 || pTableDataBlock->numOfParams > 0) { - pSql->res.code = TSDB_CODE_INVALID_SQL; - return -1; + pSql->res.code = code; + return (-code); } + pTableDataBlock->size += len; count++; @@ -1330,19 +1405,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) { int32_t code = TSDB_CODE_SUCCESS; /* the first block has been sent to server in processSQL function */ - assert(pCmd->isInsertFromFile != -1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL); + assert(pCmd->isInsertFromFile != -1 && pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL); - if (pCmd->vnodeIdx < pCmd->pDataBlocks->nSize) { + if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) { SDataBlockList *pDataBlocks = pCmd->pDataBlocks; - for (int32_t i = pCmd->vnodeIdx; i < pDataBlocks->nSize; ++i) { + for (int32_t i = pMeterMetaInfo->vnodeIndex; i < pDataBlocks->nSize; ++i) { pDataBlock = pDataBlocks->pData[i]; if (pDataBlock == NULL) { continue; } if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) { - tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx, pDataBlocks->nSize); + tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, pDataBlocks->nSize); continue; } @@ -1399,8 +1474,16 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { tscError("%p get meter meta failed, abort", pSql); continue; } + + char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" + if (NULL == tmpTokenBuf) { + tscError("%p calloc failed", pSql); + continue; + } - int nrows = tscInsertDataFromFile(pSql, fp); + int nrows = tscInsertDataFromFile(pSql, fp, tmpTokenBuf); + free(tmpTokenBuf); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); if (nrows < 0) { diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index e956d6159e4a02707e15197672b59edc66466043..ef8ddfd211acbcf76ae2a9471d6aca8cc4a40c97 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -13,12 +13,9 @@ * along with this program. If not, see . */ -#include -#include - #include "taos.h" #include "tsclient.h" -#include "tsql.h" +#include "tscSQLParser.h" #include "tscUtil.h" #include "ttimer.h" #include "taosmsg.h" @@ -78,7 +75,6 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_ if (isParam) { ++stmt->numParams; } - return TSDB_CODE_SUCCESS; } @@ -412,7 +408,9 @@ static int insertStmtReset(STscStmt* pStmt) { } } pCmd->batchSize = 0; - pCmd->vnodeIdx = 0; + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + pMeterMetaInfo->vnodeIndex = 0; return TSDB_CODE_SUCCESS; } @@ -425,6 +423,8 @@ static int insertStmtExecute(STscStmt* stmt) { ++pCmd->batchSize; } + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + if (pCmd->pDataBlocks->nSize > 0) { // merge according to vgid int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks); @@ -439,7 +439,7 @@ static int insertStmtExecute(STscStmt* stmt) { } // set the next sent data vnode index in data block arraylist - pCmd->vnodeIdx = 1; + pMeterMetaInfo->vnodeIndex = 1; } else { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); } diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index 770e61e27827f06fc22e0d73831041a86b8f3bf8..f5925b61cd3c78895f87bc02b3e4fee2b65fb2f4 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -13,9 +13,6 @@ * along with this program. If not, see . */ -#include -#include - #include "os.h" #include "tlog.h" #include "tsclient.h" @@ -23,6 +20,27 @@ #include "ttimer.h" #include "tutil.h" +void tscSaveSlowQueryFp(void *handle, void *tmrId); +void *tscSlowQueryConn = NULL; +bool tscSlowQueryConnInitialized = false; +TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), + void *param, void **taos); + +void tscInitConnCb(void *param, TAOS_RES *result, int code) { + char *sql = param; + if (code < 0) { + tscError("taos:%p, slow query connect failed, code:%d", tscSlowQueryConn, code); + taos_close(tscSlowQueryConn); + tscSlowQueryConn = NULL; + tscSlowQueryConnInitialized = false; + free(sql); + } else { + tscTrace("taos:%p, slow query connect success, code:%d", tscSlowQueryConn, code); + tscSlowQueryConnInitialized = true; + tscSaveSlowQueryFp(sql, NULL); + } +} + void tscAddIntoSqlList(SSqlObj *pSql) { static uint32_t queryId = 1; @@ -47,36 +65,38 @@ void tscAddIntoSqlList(SSqlObj *pSql) { void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) { if (code < 0) { - tscError("failed to save slowquery, code:%d", code); + tscError("failed to save slow query, code:%d", code); + } else { + tscTrace("success to save slow query, code:%d", code); } } void tscSaveSlowQueryFp(void *handle, void *tmrId) { char *sql = handle; - static void *taos = NULL; - if (taos == NULL) { - taos = taos_connect(NULL, "monitor", tsInternalPass, NULL, 0); - if (taos == NULL) { - tscError("failed to save slow query, can't connect to server"); + if (!tscSlowQueryConnInitialized) { + if (tscSlowQueryConn == NULL) { + tscTrace("start to init slow query connect"); + taos_connect_a(NULL, "monitor", tsInternalPass, "", 0, tscInitConnCb, sql, &tscSlowQueryConn); + } else { + tscError("taos:%p, slow query connect is already initialized", tscSlowQueryConn); free(sql); - return; } + } else { + tscTrace("taos:%p, save slow query:%s", tscSlowQueryConn, sql); + taos_query_a(tscSlowQueryConn, sql, tscSaveSlowQueryFpCb, NULL); + free(sql); } - - tscTrace("save slow query:sql", sql); - taos_query_a(taos, sql, tscSaveSlowQueryFpCb, NULL); - free(sql); } void tscSaveSlowQuery(SSqlObj *pSql) { const static int64_t SLOW_QUERY_INTERVAL = 3000000L; if (pSql->res.useconds < SLOW_QUERY_INTERVAL) return; - tscTrace("%p query time:%ld sql:%s", pSql, pSql->res.useconds, pSql->sqlstr); + tscTrace("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr); char *sql = malloc(200); - int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %lld, %lld, '", tsMonitorDbName, + int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName, pSql->pTscObj->user, pSql->stime, pSql->res.useconds); int sqlLen = snprintf(sql + len, TSDB_SHOW_SQL_LEN, "%s", pSql->sqlstr); if (sqlLen > TSDB_SHOW_SQL_LEN - 1) { @@ -177,8 +197,10 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { } pthread_mutex_unlock(&pObj->mutex); - - tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); + + if (pStream) { + tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); + } taos_close_stream(pStream); if (pStream->callback) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index ae9704effa601a5594c59732a526ad5375f60520..23143306e35cecc42793dd1dc7ae33d12594f646 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -20,13 +20,13 @@ #include "taos.h" #include "taosmsg.h" #include "tstoken.h" +#include "tstrbuild.h" #include "ttime.h" +#include "tscSQLParser.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" -#include "tsql.h" -#pragma GCC diagnostic ignored "-Wunused-variable" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -48,40 +48,33 @@ typedef struct SColumnIdListRes { SColumnList list; } SColumnIdListRes; -static SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t fieldIDInResult, int32_t colIdx, int32_t tableIndex); +static SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx, int32_t tableIndex); static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); +static char* getAccountId(SSqlObj* pSql); -static bool has(tFieldList* pFieldList, int32_t offset, char* name); - -static char* getAccountId(SSqlObj* pSql); - +static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name); static void getCurrentDBName(SSqlObj* pSql, SSQLToken* pDBToken); static bool hasSpecifyDB(SSQLToken* pTableName); static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd); - static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd); static int32_t setObjFullName(char* fullName, char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* len); -static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nLen); +static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength); static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName); static int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem); - -static int32_t insertResultField(SSqlCmd* pCmd, int32_t fieldIDInResult, SColumnList* pIdList, int16_t bytes, - int8_t type, char* fieldName); +static int32_t insertResultField(SSqlCmd* pCmd, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, + char* fieldName); static int32_t changeFunctionID(int32_t optr, int16_t* functionId); - -static void setErrMsg(SSqlCmd* pCmd, const char* pzErrMsg); - static int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric); static bool validateIpAddress(char* ip); static bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd); static bool functionCompatibleCheck(SSqlCmd* pCmd); - static void setColumnOffsetValueInResultset(SSqlCmd* pCmd); + static int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList); static int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql); @@ -94,7 +87,6 @@ static int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL); static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema, int32_t numOfCols); static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd); -static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField); static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd); static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString); @@ -105,23 +97,34 @@ static int32_t validateLocalConfig(tDCLSQL* pOptions); static int32_t validateColumnName(char* name); static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); +static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField); static bool hasTimestampForPointInterpQuery(SSqlCmd* pCmd); static void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex); -static int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql); -static int32_t parseCreateDBOptions(SCreateDBInfo* pCreateDbSql, SSqlCmd* pCmd); +static int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql); +static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql); static int32_t getColumnIndexByNameEx(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex); static int32_t getTableIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex); static int32_t optrToString(tSQLExpr* pExpr, char** exprString); -static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex); static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex* pIndex); static int32_t doFunctionsCompatibleCheck(SSqlObj* pSql); +static int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd); +static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate); + +static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex); + +/* + * Used during parsing query sql. Since the query sql usually small in length, error position + * is not needed in the final error message. + */ +static int32_t invalidSqlErrMsg(SSqlCmd* pCmd, const char* errMsg) { + return tscInvalidSQLErrMsg(pCmd->payload, errMsg, NULL); +} static int32_t tscQueryOnlyMetricTags(SSqlCmd* pCmd, bool* queryOnMetricTags) { assert(QUERY_IS_STABLE_QUERY(pCmd->type)); - // here colIdx == -1 means the special column tbname that is the name of each table *queryOnMetricTags = true; for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); @@ -146,13 +149,11 @@ static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, tVariant* pVar) { if (seg != NULL) { if (taosParseTime(pVar->pz, &time, pVar->nLen, pMeterMetaInfo->pMeterMeta->precision) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } } else { if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT)) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } } @@ -171,8 +172,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSqlCmd* pCmd = &(pSql->cmd); if (!pInfo->validSql) { - setErrMsg(pCmd, pInfo->pzErrMsg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, pInfo->pzErrMsg); } SMeterMetaInfo* pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); @@ -190,8 +190,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken* pzName = &pInfo->pDCLInfo->a[0]; if ((pInfo->sqlType != DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } if (pInfo->sqlType == DROP_DATABASE) { @@ -202,7 +201,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { int32_t code = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL); if (code != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); + invalidSqlErrMsg(pCmd, msg2); } return code; @@ -214,13 +213,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { int32_t ret = setMeterID(pSql, pzName, 0); if (ret != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); + invalidSqlErrMsg(pCmd, msg); } return ret; } else { if (pzName->n > TSDB_USER_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (pInfo->sqlType == DROP_USER) { @@ -232,15 +230,13 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { const int32_t MAX_IP_ADDRESS_LEGNTH = 16; if (pzName->n > MAX_IP_ADDRESS_LEGNTH) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } char str[128] = {0}; strncpy(str, pzName->z, pzName->n); if (!validateIpAddress(str)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -250,20 +246,17 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } case USE_DATABASE: { - const char* msg = "db name too long"; pCmd->command = TSDB_SQL_USE_DB; SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - const char* msg1 = "invalid db name"; - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, "invalid db name"); } if (pToken->n > TSDB_DB_NAME_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + const char* msg = "db name too long"; + return invalidSqlErrMsg(pCmd, msg); } int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pToken, NULL, NULL); @@ -293,15 +286,13 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case SHOW_STREAMS: case SHOW_SCORES: case SHOW_GRANTS: - case SHOW_CONFIGS: { + case SHOW_CONFIGS: + case SHOW_VNODES: { return setShowInfo(pSql, pInfo); } case ALTER_DATABASE: case CREATE_DATABASE: { - const char* msg2 = "name too long"; - const char* msg3 = "invalid db name"; - if (pInfo->sqlType == ALTER_DATABASE) { pCmd->command = TSDB_SQL_ALTER_DB; } else { @@ -311,17 +302,17 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SCreateDBInfo* pCreateDB = &(pInfo->pDCLInfo->dbOpt); if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + const char* msg3 = "invalid db name"; + return invalidSqlErrMsg(pCmd, msg3); } int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname), NULL, NULL); if (ret != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); - return ret; + const char* msg2 = "name too long"; + return invalidSqlErrMsg(pCmd, msg2); } - if (parseCreateDBOptions(pCreateDB, pCmd) != TSDB_CODE_SUCCESS) { + if (parseCreateDBOptions(pCmd, pCreateDB) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } @@ -336,14 +327,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { char ipAddr[64] = {0}; const int32_t MAX_IP_ADDRESS_LENGTH = 16; if (pInfo->pDCLInfo->nTokens > 1 || pInfo->pDCLInfo->a[0].n > MAX_IP_ADDRESS_LENGTH) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } memcpy(ipAddr, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); if (validateIpAddress(ipAddr) == false) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); @@ -355,15 +344,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { pCmd->command = (pInfo->sqlType == CREATE_USER) ? TSDB_SQL_CREATE_USER : TSDB_SQL_CREATE_ACCT; assert(pInfo->pDCLInfo->nTokens >= 2); - const char* msg = "name or password too long"; - const char* msg1 = "password can not be empty"; - const char* msg2 = "invalid user/account name"; - const char* msg3 = "password needs single quote marks enclosed"; - const char* msg4 = "invalid state option, available options[no, r, w, all]"; - if (pInfo->pDCLInfo->a[1].type != TK_STRING) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + const char* msg3 = "password needs single quote marks enclosed"; + return invalidSqlErrMsg(pCmd, msg3); } strdequote(pInfo->pDCLInfo->a[1].z); @@ -371,18 +354,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); if (pInfo->pDCLInfo->a[1].n <= 0) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + const char* msg1 = "password can not be empty"; + return invalidSqlErrMsg(pCmd, msg1); } if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + const char* msg = "name or password too long"; + return invalidSqlErrMsg(pCmd, msg); } if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + const char* msg2 = "invalid user/account name"; + return invalidSqlErrMsg(pCmd, msg2); } strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name @@ -415,8 +398,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { pCmd->defaultVal[8] = 0; } else { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + const char* msg4 = "invalid state option, available options[no, r, w, all]"; + return invalidSqlErrMsg(pCmd, msg4); } } } @@ -428,15 +411,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { assert(num >= 1 && num <= 2); const char* msg = "password too long"; - const char* msg1 = "password can not be empty"; - const char* msg2 = "invalid user/account name"; - const char* msg3 = "password needs single quote marks enclosed"; - const char* msg4 = "invalid state option, available options[no, r, w, all]"; if (num == 2) { if (pInfo->pDCLInfo->a[1].type != TK_STRING) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + const char* msg3 = "password needs single quote marks enclosed"; + return invalidSqlErrMsg(pCmd, msg3); } strdequote(pInfo->pDCLInfo->a[1].z); @@ -444,26 +423,24 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); if (pInfo->pDCLInfo->a[1].n <= 0) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + const char* msg1 = "password can not be empty"; + return invalidSqlErrMsg(pCmd, msg1); } if (pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } strncpy(pCmd->payload, pInfo->pDCLInfo->a[1].z, pInfo->pDCLInfo->a[1].n); // passwd } if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + const char* msg2 = "invalid user/account name"; + return invalidSqlErrMsg(pCmd, msg2); } strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name @@ -493,8 +470,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { pCmd->defaultVal[8] = 0; } else { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + const char* msg4 = "invalid state option, available options[no, r, w, all]"; + return invalidSqlErrMsg(pCmd, msg4); } } break; @@ -504,21 +481,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; const char* msg = "table name is too long"; + const char* msg1 = "invalid table name"; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - const char* msg1 = "invalid table name"; - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } if (pToken->n > TSDB_METER_NAME_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (setMeterID(pSql, pToken, 0) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); @@ -547,13 +521,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (pDCL->a[1].n <= 0) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } if (pDCL->a[0].n > TSDB_METER_NAME_LEN || pDCL->a[1].n > TSDB_PASSWORD_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (pCmd->command == TSDB_SQL_CFG_DNODE) { @@ -562,16 +534,14 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { /* validate the ip address */ if (!validateIpAddress(ip)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } strcpy(pMeterMetaInfo->name, ip); /* validate the parameter names and options */ if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); @@ -595,8 +565,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (pDCL->a[1].n <= 0 || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { /* password cannot be empty string */ - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); @@ -610,8 +579,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } else if (strncasecmp(pDCL->a[1].z, "write", 5) == 0 && pDCL->a[1].n == 5) { pCmd->count = 3; } else { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } } else { return TSDB_CODE_INVALID_SQL; @@ -621,19 +589,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } case ALTER_LOCAL: { pCmd->command = TSDB_SQL_CFG_LOCAL; - /* - if (pInfo->pDCLInfo->a[0].n > TSDB_METER_ID_LEN) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; - } - */ tDCLSQL* pDCL = pInfo->pDCLInfo; const char* msg = "invalid configure options or values"; // validate the parameter names and options if (validateLocalConfig(pDCL) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } strncpy(pCmd->payload, pDCL->a[0].z, pDCL->a[0].n); @@ -660,13 +621,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } if (setMeterID(pSql, pzTableName, 0) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (!validateTableColumnInfo(pFieldList, pCmd) || @@ -696,19 +655,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg = "invalid table name"; const char* msg1 = "illegal value or data overflow"; const char* msg2 = "illegal number of tags"; + const char* msg3 = "tag value too long"; // table name // metric name, create table by using dst SSQLToken* pToken = &(pInfo->pCreateTableInfo->usingInfo.metricName); if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (setMeterID(pSql, pToken, 0) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } // get meter meta from mnode @@ -723,8 +681,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (pMeterMetaInfo->pMeterMeta->numOfTags != pList->nExpr) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } // too long tag values will return invalid sql, not be truncated automatically @@ -734,24 +691,20 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { for (int32_t i = 0; i < pList->nExpr; ++i) { int32_t ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type); if (ret != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } // validate the length of binary if ((pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) && pList->a[i].pVar.nLen > pTagSchema[i].bytes) { - const char* msg3 = "tag value too long"; - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } tagVal += pTagSchema[i].bytes; } if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } int32_t ret = setMeterID(pSql, &pInfo->pCreateTableInfo->name, 0); @@ -769,14 +722,14 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg2 = "table name too long"; const char* msg3 = "fill only available for interval query"; const char* msg4 = "fill option not supported in stream computing"; + const char* msg5 = "sql too long"; // todo ADD support // if sql specifies db, use it, otherwise use default db SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); SQuerySQL* pQuerySql = pInfo->pCreateTableInfo->pSelect; if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; @@ -784,13 +737,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING}; if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } if (setMeterID(pSql, &srcToken, 0) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } int32_t code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); @@ -824,23 +775,19 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { // set the created table[stream] name if (setMeterID(pSql, pzTableName, 0) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } // copy sql length int ret = tscAllocPayload(pCmd, pQuerySql->selectToken.n + 8); if (TSDB_CODE_SUCCESS != ret) { - const char* msg6 = "client out of memory"; - setErrMsg(pCmd, msg6); + invalidSqlErrMsg(pCmd, "client out of memory"); return ret; } strncpy(pCmd->payload, pQuerySql->selectToken.z, pQuerySql->selectToken.n); if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) { - const char* msg5 = "sql too long"; // todo ADD support - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg5); } if (tsRewriteFieldNameIfNecessary(pCmd) != TSDB_CODE_SUCCESS) { @@ -859,16 +806,14 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { */ if (pQuerySql->fillType != NULL) { if (pCmd->nAggTimeInterval == 0) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } tVariantListItem* pItem = &pQuerySql->fillType->a[0]; if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg4); } } } @@ -878,7 +823,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSQL_QUERY_METER: { SQuerySQL* pQuerySql = pInfo->pQueryInfo; - assert(pQuerySql != NULL && pQuerySql->from->nExpr > 0); + assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); const char* msg0 = "invalid table name"; const char* msg1 = "table name too long"; @@ -891,15 +836,28 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg8 = "too many columns in selection clause"; const char* msg9 = "TWA query requires both the start and end time"; + int32_t code = TSDB_CODE_SUCCESS; + // too many result columns not support order by in query if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) { - setErrMsg(pCmd, msg8); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg8); + } + + /* + * handle the sql expression without from subclause + * select current_database(); + * select server_version(); + * select client_version(); + * select server_state(); + */ + if (pQuerySql->from == NULL) { + assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL && + pQuerySql->pSortOrder == NULL); + return doLocalQueryProcess(pQuerySql, pCmd); } if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) { - setErrMsg(pCmd, msg7); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg7); } // set all query tables, which are maybe more than one. @@ -907,16 +865,14 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariant* pTableItem = &pQuerySql->from->a[i].pVar; if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } pTableItem->nLen = strdequote(pTableItem->pz); SSQLToken tableName = {.z = pTableItem->pz, .n = pTableItem->nLen, .type = TK_STRING}; if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } if (pCmd->numOfTables <= i) { @@ -925,19 +881,17 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; if (setMeterID(pSql, &t, i) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } SMeterMetaInfo* pMeterInfo1 = tscGetMeterMetaInfo(pCmd, i); - int32_t code = tscGetMeterMeta(pSql, pMeterInfo1->name, i); + code = tscGetMeterMeta(pSql, pMeterInfo1->name, i); if (code != TSDB_CODE_SUCCESS) { return code; } } pSql->cmd.command = TSDB_SQL_SELECT; - int32_t code = TSDB_CODE_SUCCESS; // parse the group by clause in the first place if (parseGroupbyClause(pCmd, pQuerySql->pGroupby) != TSDB_CODE_SUCCESS) { @@ -964,8 +918,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { // TODO refactor pCmd->count == 1 means sql in stream function if (!tscEmbedded && pCmd->count == 0) { const char* msg = "not support sliding in query"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } getTimestampInUsFromStr(pSliding->z, pSliding->n, &pCmd->nSlidingTime); @@ -974,13 +927,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (pCmd->nSlidingTime < tsMinSlidingTime) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg4); } } @@ -1011,8 +962,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { if ((pCmd->stime == 0 || pCmd->etime == INT64_MAX || (pCmd->etime == INT64_MAX / 1000 && pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI)) && tscIsTWAQuery(pCmd)) { - setErrMsg(pCmd, msg9); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg9); } // no result due to invalid query time range @@ -1022,22 +972,19 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (!hasTimestampForPointInterpQuery(pCmd)) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } if (pQuerySql->fillType != NULL) { if (pCmd->nAggTimeInterval == 0 && (!tscIsPointInterpQuery(pCmd))) { - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg5); } if (pCmd->nAggTimeInterval > 0) { int64_t timeRange = labs(pCmd->stime - pCmd->etime); // number of result is not greater than 10,000,000 if ((timeRange == 0) || (timeRange / pCmd->nAggTimeInterval) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) { - setErrMsg(pCmd, msg6); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg6); } } @@ -1052,8 +999,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { int64_t timeRange = labs(pCmd->stime - pCmd->etime); if (timeRange == 0 && pCmd->stime == 0) { - setErrMsg(pCmd, msg6); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg6); } } @@ -1070,7 +1016,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } setColumnOffsetValueInResultset(pCmd); - updateTagColumnIndex(pCmd, 0); + + for (int32_t i = 0; i < pCmd->numOfTables; ++i) { + updateTagColumnIndex(pCmd, i); + } break; } @@ -1121,7 +1070,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (pQuerySql->interval.type == 0) { + if (pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0) { return TSDB_CODE_SUCCESS; } @@ -1141,8 +1090,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { // interval cannot be less than 10 milliseconds if (pCmd->nAggTimeInterval < tsMinIntervalTime) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } // for top/bottom + interval query, we do not add additional timestamp column in the front @@ -1151,11 +1099,10 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { } // check the invalid sql expresssion: select count(tbname)/count(tag1)/count(tag2) from super_table interval(1d); - for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); if (pExpr->functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -1198,13 +1145,11 @@ int32_t setSlidingClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { } if (pCmd->nSlidingTime < tsMinSlidingTime) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -1232,7 +1177,7 @@ int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex) { } if (code != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); + invalidSqlErrMsg(pCmd, msg); } return code; @@ -1251,13 +1196,13 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { // number of fields no less than 2 if (pFieldList->nField <= 1 || pFieldList->nField > TSDB_MAX_COLUMNS) { - setErrMsg(pCmd, msg); + invalidSqlErrMsg(pCmd, msg); return false; } // first column must be timestamp if (pFieldList->p[0].type != TSDB_DATA_TYPE_TIMESTAMP) { - setErrMsg(pCmd, msg1); + invalidSqlErrMsg(pCmd, msg1); return false; } @@ -1268,7 +1213,7 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { // max row length must be less than TSDB_MAX_BYTES_PER_ROW if (nLen > TSDB_MAX_BYTES_PER_ROW) { - setErrMsg(pCmd, msg2); + invalidSqlErrMsg(pCmd, msg2); return false; } @@ -1276,23 +1221,23 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { for (int32_t i = 0; i < pFieldList->nField; ++i) { TAOS_FIELD* pField = &pFieldList->p[i]; if (pField->type < TSDB_DATA_TYPE_BOOL || pField->type > TSDB_DATA_TYPE_NCHAR) { - setErrMsg(pCmd, msg4); + invalidSqlErrMsg(pCmd, msg4); return false; } if ((pField->type == TSDB_DATA_TYPE_BINARY && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_BINARY_LEN)) || (pField->type == TSDB_DATA_TYPE_NCHAR && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_NCHAR_LEN))) { - setErrMsg(pCmd, msg5); + invalidSqlErrMsg(pCmd, msg5); return false; } if (validateColumnName(pField->name) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg6); + invalidSqlErrMsg(pCmd, msg6); return false; } if (has(pFieldList, i + 1, pFieldList->p[i].name) == true) { - setErrMsg(pCmd, msg3); + invalidSqlErrMsg(pCmd, msg3); return false; } } @@ -1313,7 +1258,7 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq // number of fields at least 1 if (pTagsList->nField < 1 || pTagsList->nField > TSDB_MAX_TAGS) { - setErrMsg(pCmd, msg1); + invalidSqlErrMsg(pCmd, msg1); return false; } @@ -1324,14 +1269,14 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq // max tag row length must be less than TSDB_MAX_TAGS_LEN if (nLen > TSDB_MAX_TAGS_LEN) { - setErrMsg(pCmd, msg2); + invalidSqlErrMsg(pCmd, msg2); return false; } // field name must be unique for (int32_t i = 0; i < pTagsList->nField; ++i) { if (has(pFieldList, 0, pTagsList->p[i].name) == true) { - setErrMsg(pCmd, msg3); + invalidSqlErrMsg(pCmd, msg3); return false; } } @@ -1339,28 +1284,28 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq /* timestamp in tag is not allowed */ for (int32_t i = 0; i < pTagsList->nField; ++i) { if (pTagsList->p[i].type == TSDB_DATA_TYPE_TIMESTAMP) { - setErrMsg(pCmd, msg4); + invalidSqlErrMsg(pCmd, msg4); return false; } if (pTagsList->p[i].type < TSDB_DATA_TYPE_BOOL || pTagsList->p[i].type > TSDB_DATA_TYPE_NCHAR) { - setErrMsg(pCmd, msg5); + invalidSqlErrMsg(pCmd, msg5); return false; } if ((pTagsList->p[i].type == TSDB_DATA_TYPE_BINARY && pTagsList->p[i].bytes <= 0) || (pTagsList->p[i].type == TSDB_DATA_TYPE_NCHAR && pTagsList->p[i].bytes <= 0)) { - setErrMsg(pCmd, msg7); + invalidSqlErrMsg(pCmd, msg7); return false; } if (validateColumnName(pTagsList->p[i].name) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg6); + invalidSqlErrMsg(pCmd, msg6); return false; } if (has(pTagsList, i + 1, pTagsList->p[i].name) == true) { - setErrMsg(pCmd, msg3); + invalidSqlErrMsg(pCmd, msg3); return false; } } @@ -1387,18 +1332,18 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { char msg[128] = {0}; sprintf(msg, "tags no more than %d", TSDB_MAX_TAGS); - setErrMsg(pCmd, msg); + invalidSqlErrMsg(pCmd, msg); return false; } // no timestamp allowable if (pTagField->type == TSDB_DATA_TYPE_TIMESTAMP) { - setErrMsg(pCmd, msg1); + invalidSqlErrMsg(pCmd, msg1); return false; } if (pTagField->type < TSDB_DATA_TYPE_BOOL && pTagField->type > TSDB_DATA_TYPE_NCHAR) { - setErrMsg(pCmd, msg6); + invalidSqlErrMsg(pCmd, msg6); return false; } @@ -1411,19 +1356,19 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { // length less than TSDB_MAX_TASG_LEN if (nLen + pTagField->bytes > TSDB_MAX_TAGS_LEN) { - setErrMsg(pCmd, msg3); + invalidSqlErrMsg(pCmd, msg3); return false; } // tags name can not be a keyword if (validateColumnName(pTagField->name) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg4); + invalidSqlErrMsg(pCmd, msg4); return false; } // binary(val), val can not be equalled to or less than 0 if ((pTagField->type == TSDB_DATA_TYPE_BINARY || pTagField->type == TSDB_DATA_TYPE_NCHAR) && pTagField->bytes <= 0) { - setErrMsg(pCmd, msg5); + invalidSqlErrMsg(pCmd, msg5); return false; } @@ -1432,7 +1377,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { if (strncasecmp(pTagField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { - setErrMsg(pCmd, msg2); + invalidSqlErrMsg(pCmd, msg2); return false; } } @@ -1454,17 +1399,17 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { // no more max columns if (pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS || pMeterMeta->numOfTags + pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS) { - setErrMsg(pCmd, msg1); + invalidSqlErrMsg(pCmd, msg1); return false; } if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_NCHAR) { - setErrMsg(pCmd, msg4); + invalidSqlErrMsg(pCmd, msg4); return false; } if (validateColumnName(pColField->name) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg5); + invalidSqlErrMsg(pCmd, msg5); return false; } @@ -1476,20 +1421,20 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { } if (pColField->bytes <= 0) { - setErrMsg(pCmd, msg6); + invalidSqlErrMsg(pCmd, msg6); return false; } // length less than TSDB_MAX_BYTES_PER_ROW if (nLen + pColField->bytes > TSDB_MAX_BYTES_PER_ROW) { - setErrMsg(pCmd, msg3); + invalidSqlErrMsg(pCmd, msg3); return false; } // field name must be unique for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { if (strncasecmp(pColField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { - setErrMsg(pCmd, msg2); + invalidSqlErrMsg(pCmd, msg2); return false; } } @@ -1498,7 +1443,7 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { } /* is contained in pFieldList or not */ -static bool has(tFieldList* pFieldList, int32_t startIdx, char* name) { +static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name) { for (int32_t j = startIdx; j < pFieldList->nField; ++j) { if (strncasecmp(name, pFieldList->p[j].name, TSDB_COL_NAME_LEN) == 0) return true; } @@ -1630,8 +1575,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric int32_t ret = validateArithmeticSQLExpr(pItem->pNode, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns, &columnList); if (ret != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } char arithmeticExprStr[1024] = {0}; @@ -1660,8 +1604,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric * not support such expression * e.g., select 12+5 from table_name */ - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } if (pCmd->fieldsInfo.numOfOutputCols > TSDB_MAX_COLUMNS) { @@ -1670,8 +1613,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric } if (!functionCompatibleCheck(pCmd)) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } if (isMetric) { @@ -1849,24 +1791,21 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (getColumnIndexByNameEx(&pItem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - SColumnIndex index1 = {0, TSDB_TBNAME_COLUMN_INDEX}; - SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; + SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; strcpy(colSchema.name, TSQL_TBNAME_L); pCmd->type = TSDB_QUERY_TYPE_STABLE_QUERY; - tscAddSpecialColumnForSelect(pCmd, startPos, TSDB_FUNC_TAGPRJ, &index1, &colSchema, true); + tscAddSpecialColumnForSelect(pCmd, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true); } else { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; if (index.columnIndex >= pMeterMeta->numOfColumns && UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } addProjectQueryCol(pCmd, startPos, &index, pItem); @@ -1890,7 +1829,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY || pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_NCHAR || pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BOOL) { - setErrMsg(pCmd, msg1); + invalidSqlErrMsg(pCmd, msg1); return -1; } else { type = TSDB_DATA_TYPE_DOUBLE; @@ -1925,8 +1864,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem SMeterMetaInfo* pMeterMetaInfo = NULL; int32_t optr = pItem->pNode->nSQLOptr; - int32_t numOfAddedColumn = 1; - const char* msg1 = "not support column types"; const char* msg2 = "invalid parameters"; const char* msg3 = "illegal column name"; @@ -1938,8 +1875,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem case TK_COUNT: { if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) { /* more than one parameter for count() function */ - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } int16_t functionID = 0; @@ -1952,8 +1888,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (pItem->pNode->pParam != NULL) { SSQLToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo; if (pToken->z == NULL || pToken->n == 0) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; @@ -1963,8 +1898,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem SSQLToken tmpToken = pParamElem->pNode->colInfo; if (getTableIndexByName(&tmpToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg4); } index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; @@ -1973,8 +1907,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem } else { // count the number of meters created according to the metric if (getColumnIndexByNameEx(pToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); @@ -2015,20 +1948,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (pItem->pNode->pParam == NULL || (optr != TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 1) || (optr == TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 3)) { /* no parameters or more than one parameter for function */ - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + if ((getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) || + index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + return invalidSqlErrMsg(pCmd, msg3); } // 2. check if sql function can be applied on this column data type @@ -2036,9 +1967,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, index.columnIndex); int16_t colType = pSchema->type; - if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + if (colType <= TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { + return invalidSqlErrMsg(pCmd, msg1); } char columnName[TSDB_COL_NAME_LEN] = {0}; @@ -2070,8 +2000,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - setErrMsg(pCmd, msg6); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg6); } SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionID, &index, resultType, resultSize, resultSize); @@ -2113,8 +2042,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (!requireAllFields) { if (pItem->pNode->pParam->nExpr < 1) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } /* in first/last function, multiple columns can be add to resultset */ @@ -2122,8 +2050,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem for (int32_t i = 0; i < pItem->pNode->pParam->nExpr; ++i) { tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[i]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -2133,8 +2060,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem SSQLToken tmpToken = pParamElem->pNode->colInfo; if (getTableIndexByName(&tmpToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg4); } pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); @@ -2149,8 +2075,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem } else { if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); @@ -2158,8 +2083,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - setErrMsg(pCmd, msg6); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg6); } if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx + i, &index) != 0) { @@ -2196,14 +2120,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // 1. valid the number of parameters if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 2) { /* no parameters or more than one parameter for function */ - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ID) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } char columnName[TSDB_COL_NAME_LEN] = {0}; @@ -2211,8 +2133,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); @@ -2220,21 +2141,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - setErrMsg(pCmd, msg6); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg6); } // 2. valid the column type int16_t colType = pSchema[index.columnIndex].type; if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } // 3. valid the parameters if (pParamElem[1].pNode->nSQLOptr == TK_ID) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } tVariant* pVariant = &pParamElem[1].pNode->val; @@ -2242,14 +2160,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem int8_t resultType = pSchema[index.columnIndex].type; int16_t resultSize = pSchema[index.columnIndex].bytes; - char val[8] = {0}; + char val[8] = {0}; + int32_t numOfAddedColumn = 1; if (optr == TK_PERCENTILE || optr == TK_APERCENTILE) { tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE); double dp = *((double*)val); if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) { - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg5); } resultSize = sizeof(double); @@ -2272,8 +2190,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem int64_t nTop = *((int32_t*)val); if (nTop <= 0 || nTop > 100) { // todo use macro - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg5); } int16_t functionId = 0; @@ -2380,8 +2297,7 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p if (colIndex != COLUMN_INDEX_INITIAL_VAL) { if (pIndex->columnIndex != COLUMN_INDEX_INITIAL_VAL) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } else { pIndex->tableIndex = i; pIndex->columnIndex = colIndex; @@ -2396,8 +2312,7 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p } if (pIndex->columnIndex == COLUMN_INDEX_INITIAL_VAL) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -2422,7 +2337,7 @@ static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex for (int32_t i = 0; i < pCmd->numOfTables; ++i) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - extractMeterName(pMeterMetaInfo->name, tableName); + extractTableName(pMeterMetaInfo->name, tableName); if (strncasecmp(tableName, pTableToken->z, pTableToken->n) == 0 && strlen(tableName) == pTableToken->n) { pIndex->tableIndex = i; @@ -2584,6 +2499,9 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { case SHOW_CONFIGS: pCmd->showType = TSDB_MGMT_TABLE_CONFIGS; break; + case SHOW_VNODES: + pCmd->showType = TSDB_MGMT_TABLE_VNODES; + break; default: return TSDB_CODE_INVALID_SQL; } @@ -2598,13 +2516,11 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken* pDbPrefixToken = &pInfo->pDCLInfo->a[0]; if (pDbPrefixToken->n > TSDB_DB_NAME_LEN) { // db name is too long - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (pDbPrefixToken->n > 0 && tscValidateName(pDbPrefixToken) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } int32_t ret = 0; @@ -2624,11 +2540,26 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pCmd->payloadLen = strdequote(pCmd->payload); if (pCmd->payloadLen > TSDB_METER_NAME_LEN) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; // wildcard is too long + return invalidSqlErrMsg(pCmd, msg2); } } } + } else if (type == SHOW_VNODES) { + if (NULL == pInfo->pDCLInfo) { + return invalidSqlErrMsg(pCmd, "No specified ip of dnode"); + } + + // show vnodes may be ip addr of dnode in payload + if (pInfo->pDCLInfo->nTokens > 0) { + SSQLToken* pDnodeIp = &pInfo->pDCLInfo->a[0]; + + if (pDnodeIp->n > TSDB_IPv4ADDR_LEN) { // ip addr is too long + return invalidSqlErrMsg(pCmd, msg); + } + + strncpy(pCmd->payload, pDnodeIp->z, pDnodeIp->n); + pCmd->payloadLen = strdequote(pCmd->payload); + } } return TSDB_CODE_SUCCESS; @@ -2666,27 +2597,20 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { memset(pCmd->payload, 0, tListLen(pCmd->payload)); const char* msg = "invalid ip address"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } - int32_t port = strtol(portStr, NULL, 10); + uint16_t port = (uint16_t)strtol(portStr, NULL, 10); if (port <= 0 || port > 65535) { memset(pCmd->payload, 0, tListLen(pCmd->payload)); const char* msg = "invalid port"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } return TSDB_CODE_SUCCESS; } -void setErrMsg(SSqlCmd* pCmd, const char* pzErrMsg) { - strncpy(pCmd->payload, pzErrMsg, pCmd->allocSize); - pCmd->payload[pCmd->allocSize - 1] = 0; -} - bool validateIpAddress(char* ip) { in_addr_t ipAddr = inet_addr(ip); return (ipAddr != 0) && (ipAddr != 0xffffffff); @@ -2749,8 +2673,7 @@ void tscRestoreSQLFunctionForMetricQuery(SSqlCmd* pCmd) { bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { const char* msg1 = "TWA not allowed to apply to super table directly"; - const char* msg2 = "functions not supported for super table"; - const char* msg3 = "TWA only support group by tbname for super table query"; + const char* msg2 = "TWA only support group by tbname for super table query"; // filter sql function not supported by metric query yet. for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { @@ -2762,12 +2685,12 @@ bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { if (tscIsTWAQuery(pCmd)) { if (pCmd->groupbyExpr.numOfGroupCols == 0) { - setErrMsg(pCmd, msg1); + invalidSqlErrMsg(pCmd, msg1); return true; } if (pCmd->groupbyExpr.numOfGroupCols != 1 || pCmd->groupbyExpr.columnInfo[0].colIdx != TSDB_TBNAME_COLUMN_INDEX) { - setErrMsg(pCmd, msg3); + invalidSqlErrMsg(pCmd, msg2); return true; } } @@ -2776,8 +2699,6 @@ bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { } static bool functionCompatibleCheck(SSqlCmd* pCmd) { - const char* msg1 = "column on select clause not allowed"; - int32_t startIdx = 0; int32_t functionID = tscSqlExprGet(pCmd, startIdx)->functionId; @@ -2791,70 +2712,19 @@ static bool functionCompatibleCheck(SSqlCmd* pCmd) { // diff function cannot be executed with other function // arithmetic function can be executed with other arithmetic functions for (int32_t i = startIdx + 1; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; - if (functionId == TSDB_FUNC_TAGPRJ || - functionId == TSDB_FUNC_TAG || - functionId == TSDB_FUNC_TS) { - continue; - } + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); - if (funcCompatDefList[functionId] != factor) { - return false; + int16_t functionId = pExpr->functionId; + if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) { + continue; } - } - - // additional check for select aggfuntion(column), column1 from table_name group by(column1); - if ((pCmd->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) == TSDB_QUERY_TYPE_PROJECTION_QUERY) { - bool isAggFunc = false; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; - - if (functionId == TSDB_FUNC_PRJ || - functionId == TSDB_FUNC_TAGPRJ || - functionId == TSDB_FUNC_TS || - functionId == TSDB_FUNC_ARITHM) { - continue; - } - if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == 0) { - isAggFunc = true; - break; - } + if (functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + continue; } - // TODO change the type, the type is not correct - if (isAggFunc) { - pCmd->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY); - - // agg function mixed up with project query without group by exists - if (pCmd->groupbyExpr.numOfGroupCols == 0) { - return false; - } - - // get the project column - int32_t numOfPrjColumn = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); - if (pExpr->functionId == TSDB_FUNC_PRJ) { - numOfPrjColumn += 1; - - bool qualifiedCol = false; - for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { - if (pExpr->colInfo.colId == pCmd->groupbyExpr.columnInfo[j].colId) { - qualifiedCol = true; - - pExpr->param[0].i64Key = 1; // limit the output to be 1 for each state value - pExpr->numOfParams = 1; - break; - } - } - - if (!qualifiedCol) { - setErrMsg(pCmd, msg1); - return false; - } - } - } + if (funcCompatDefList[functionId] != factor) { + return false; } } @@ -2864,15 +2734,20 @@ static bool functionCompatibleCheck(SSqlCmd* pCmd) { void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); - // update tags column index for group by tags - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { - int32_t index = pCmd->groupbyExpr.columnInfo[i].colIdx; - - for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { - int32_t tagColIndex = pMeterMetaInfo->tagColumnIndex[j]; - if (tagColIndex == index) { - pCmd->groupbyExpr.columnInfo[i].colIdx = j; - break; + /* + * update tags column index for group by tags + * group by columns belong to this table + */ + if (pCmd->groupbyExpr.numOfGroupCols > 0 && pCmd->groupbyExpr.tableIndex == tableIndex) { + for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { + int32_t index = pCmd->groupbyExpr.columnInfo[i].colIdx; + + for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { + int32_t tagColIndex = pMeterMetaInfo->tagColumnIndex[j]; + if (tagColIndex == index) { + pCmd->groupbyExpr.columnInfo[i].colIdx = j; + break; + } } } } @@ -2880,10 +2755,16 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { // update tags column index for expression for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + if (!TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { // not tags, continue continue; } + // not belongs to this table + if (pExpr->uid != pMeterMetaInfo->pMeterMeta->uid) { + continue; + } + for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { if (pExpr->colInfo.colIdx == pMeterMetaInfo->tagColumnIndex[j]) { pExpr->colInfo.colIdx = j; @@ -2891,14 +2772,37 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { } } } + + // update join condition tag column index + SJoinInfo* pJoinInfo = &pCmd->tagCond.joinInfo; + if (!pJoinInfo->hasJoin) { // not join query + return; + } + + assert(pJoinInfo->left.uid != pJoinInfo->right.uid); + + // the join condition expression node belongs to this table(super table) + if (pMeterMetaInfo->pMeterMeta->uid == pJoinInfo->left.uid) { + for (int32_t i = 0; i < pMeterMetaInfo->numOfTags; ++i) { + if (pJoinInfo->left.tagCol == pMeterMetaInfo->tagColumnIndex[i]) { + pJoinInfo->left.tagCol = i; + } + } + } + + if (pMeterMetaInfo->pMeterMeta->uid == pJoinInfo->right.uid) { + for (int32_t i = 0; i < pMeterMetaInfo->numOfTags; ++i) { + if (pJoinInfo->right.tagCol == pMeterMetaInfo->tagColumnIndex[i]) { + pJoinInfo->right.tagCol = i; + } + } + } } int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { const char* msg1 = "too many columns in group by clause"; const char* msg2 = "invalid column name in group by clause"; - const char* msg4 = "group by only available for STable query"; - const char* msg5 = "group by columns must belong to one table"; - const char* msg6 = "only support group by one ordinary column"; + const char* msg3 = "group by columns must belong to one table"; const char* msg7 = "not support group by expression"; const char* msg8 = "not allowed column type for group by"; const char* msg9 = "tags not allowed for table query"; @@ -2912,15 +2816,13 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { pCmd->groupbyExpr.numOfGroupCols = pList->nExpr; if (pList->nExpr > TSDB_MAX_TAGS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } SMeterMeta* pMeterMeta = NULL; SSchema* pSchema = NULL; + SSchema s = tsGetTbnameColumnSchema(); - SSchema s = {0}; - int32_t numOfReqTags = 0; int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; for (int32_t i = 0; i < pList->nExpr; ++i) { @@ -2930,13 +2832,11 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (getColumnIndexByNameEx(&token, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } if (tableIndex != index.tableIndex && tableIndex >= 0) { - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } tableIndex = index.tableIndex; @@ -2944,22 +2844,12 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); pMeterMeta = pMeterMetaInfo->pMeterMeta; - // TODO refactor!!!!!!!!!!!!!!1 if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - s.colId = TSDB_TBNAME_COLUMN_INDEX; - s.type = TSDB_DATA_TYPE_BINARY; - s.bytes = TSDB_METER_NAME_LEN; - strcpy(s.name, TSQL_TBNAME_L); - pSchema = &s; } else { pSchema = tsGetColumnSchema(pMeterMeta, index.columnIndex); } - int16_t type = 0; - int16_t bytes = 0; - char* name = NULL; - bool groupTag = false; if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= pMeterMeta->numOfColumns) { groupTag = true; @@ -2967,8 +2857,7 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { if (groupTag) { if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - setErrMsg(pCmd, msg9); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg9); } int32_t relIndex = index.columnIndex; @@ -2982,8 +2871,7 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { } else { // check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by if (pSchema->type > TSDB_DATA_TYPE_BIGINT) { - setErrMsg(pCmd, msg8); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg8); } tscColumnBaseInfoInsert(pCmd, &index); @@ -2992,8 +2880,7 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { pCmd->groupbyExpr.orderType = TSQL_SO_ASC; if (i == 0 && pList->nExpr > 1) { - setErrMsg(pCmd, msg7); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg7); } } } @@ -3095,8 +2982,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SColumnFilterInfo* pColu pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE; break; default: - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } return TSDB_CODE_SUCCESS; @@ -3119,8 +3005,6 @@ typedef struct SCondExpr { static int32_t getTimeRange(int64_t* stime, int64_t* etime, tSQLExpr* pRight, int32_t optr, int16_t timePrecision); -static int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr); - static int32_t tSQLExprNodeToString(tSQLExpr* pExpr, char** str) { if (pExpr->nSQLOptr == TK_ID) { // column name strncpy(*str, pExpr->colInfo.z, pExpr->colInfo.n); @@ -3236,26 +3120,22 @@ static int32_t optrToString(tSQLExpr* pExpr, char** exprString) { return TSDB_CODE_SUCCESS; } -static int32_t tablenameListToString(tSQLExpr* pExpr, char* str) { +static int32_t tablenameListToString(tSQLExpr* pExpr, /*char* str*/ SStringBuilder* sb) { tSQLExprList* pList = pExpr->pParam; if (pList->nExpr <= 0) { return TSDB_CODE_INVALID_SQL; } if (pList->nExpr > 0) { - strcpy(str, QUERY_COND_REL_PREFIX_IN); - str += QUERY_COND_REL_PREFIX_IN_LEN; + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); } - int32_t len = 0; for (int32_t i = 0; i < pList->nExpr; ++i) { tSQLExpr* pSub = pList->a[i].pNode; - strncpy(str + len, pSub->val.pz, pSub->val.nLen); - - len += pSub->val.nLen; + taosStringBuilderAppendStringLen(sb, pSub->val.pz, pSub->val.nLen); if (i < pList->nExpr - 1) { - str[len++] = TBNAME_LIST_SEP[0]; + taosStringBuilderAppendString(sb, TBNAME_LIST_SEP); } if (pSub->val.nLen <= 0 || pSub->val.nLen > TSDB_METER_NAME_LEN) { @@ -3266,11 +3146,9 @@ static int32_t tablenameListToString(tSQLExpr* pExpr, char* str) { return TSDB_CODE_SUCCESS; } -static int32_t tablenameCondToString(tSQLExpr* pExpr, char* str) { - strcpy(str, QUERY_COND_REL_PREFIX_LIKE); - str += strlen(QUERY_COND_REL_PREFIX_LIKE); - - strcpy(str, pExpr->val.pz); +static int32_t tablenameCondToString(tSQLExpr* pExpr, /*char* str*/ SStringBuilder* sb) { + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN); + taosStringBuilderAppendString(sb, pExpr->val.pz); return TSDB_CODE_SUCCESS; } @@ -3290,7 +3168,6 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SColumnIndex* pIndex, tSQL const char* msg1 = "non binary column not support like operator"; const char* msg2 = "binary column not support this operator"; - const char* msg3 = "OR is not supported on different column filter"; SColumnBase* pColumn = tscColumnBaseInfoInsert(pCmd, pIndex); SColumnFilterInfo* pColFilter = NULL; @@ -3318,13 +3195,11 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SColumnIndex* pIndex, tSQL if (pColFilter->filterOnBinary) { if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE && pExpr->nSQLOptr != TK_LIKE) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } } else { if (pExpr->nSQLOptr == TK_LIKE) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -3376,7 +3251,7 @@ static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { return tSQLExprLeafToString(pExpr, true, str); } -static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, char* str) { +static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, /*char* str*/ SStringBuilder* sb) { const char* msg0 = "invalid table name list"; if (pTableCond == NULL) { @@ -3393,13 +3268,13 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, char* str) int32_t ret = TSDB_CODE_SUCCESS; if (pTableCond->nSQLOptr == TK_IN) { - ret = tablenameListToString(pRight, str); + ret = tablenameListToString(pRight, sb); } else if (pTableCond->nSQLOptr == TK_LIKE) { - ret = tablenameCondToString(pRight, str); + ret = tablenameCondToString(pRight, sb); } if (ret != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg0); + invalidSqlErrMsg(pCmd, msg0); } return ret; @@ -3437,8 +3312,7 @@ static int32_t getJoinCondInfo(SSqlObj* pSql, tSQLExpr* pExpr) { SSqlCmd* pCmd = &pSql->cmd; if (!isExprDirectParentOfLeaftNode(pExpr)) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } STagCond* pTagCond = &pCmd->tagCond; @@ -3649,14 +3523,14 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, tSQLExpr* pExpr, SColumnIndex* p } if (pExpr->nSQLOptr != TK_EQ) { - setErrMsg(pCmd, msg2); + invalidSqlErrMsg(pCmd, msg2); return false; } SColumnIndex rightIndex = COLUMN_INDEX_INITIALIZER; if (getColumnIndexByNameEx(&pRight->colInfo, pCmd, &rightIndex) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); + invalidSqlErrMsg(pCmd, msg1); return false; } @@ -3670,19 +3544,19 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, tSQLExpr* pExpr, SColumnIndex* p int16_t rightType = pRightSchema[rightIndex.columnIndex].type; if (leftType != rightType) { - setErrMsg(pCmd, msg3); + invalidSqlErrMsg(pCmd, msg3); return false; } else if (pLeftIndex->tableIndex == rightIndex.tableIndex) { - setErrMsg(pCmd, msg4); + invalidSqlErrMsg(pCmd, msg4); return false; } else if (leftType == TSDB_DATA_TYPE_BINARY || leftType == TSDB_DATA_TYPE_NCHAR) { - setErrMsg(pCmd, msg6); + invalidSqlErrMsg(pCmd, msg6); return false; } // table to table/ super table to super table are allowed if (UTIL_METER_IS_METRIC(pLeftMeterMeta) != UTIL_METER_IS_METRIC(pRightMeterMeta)) { - setErrMsg(pCmd, msg5); + invalidSqlErrMsg(pCmd, msg5); return false; } @@ -3704,8 +3578,7 @@ static bool validTableNameOptr(tSQLExpr* pExpr) { static int32_t setExprToCond(SSqlCmd* pCmd, tSQLExpr** parent, tSQLExpr* pExpr, const char* msg, int32_t parentOptr) { if (*parent != NULL) { if (parentOptr == TK_OR && msg != NULL) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } *parent = tSQLExprCreate((*parent), pExpr, parentOptr); @@ -3722,10 +3595,9 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* const char* msg2 = "illegal column name"; const char* msg3 = "only one query time range allowed"; const char* msg4 = "only one join condition allowed"; - const char* msg5 = "AND is allowed to filter on different ordinary columns"; - const char* msg6 = "not support ordinary column join"; - const char* msg7 = "only one query condition on tbname allowed"; - const char* msg8 = "only in/like allowed in filter table name"; + const char* msg5 = "not support ordinary column join"; + const char* msg6 = "only one query condition on tbname allowed"; + const char* msg7 = "only in/like allowed in filter table name"; tSQLExpr* pLeft = (*pExpr)->pLeft; tSQLExpr* pRight = (*pExpr)->pRight; @@ -3734,8 +3606,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (getColumnIndexByNameEx(&pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } assert(isExprDirectParentOfLeaftNode(*pExpr)); @@ -3768,8 +3639,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // query on tags // check for tag query condition if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } // check for like expression @@ -3782,16 +3652,14 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* if ((!isTablenameToken(&pLeft->colInfo)) && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_BINARY && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_NCHAR) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } } // in case of in operator, keep it in a seperate attribute if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { if (!validTableNameOptr(*pExpr)) { - setErrMsg(pCmd, msg8); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg7); } if (pCondExpr->pTableCond == NULL) { @@ -3799,8 +3667,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr->relType = parentOptr; pCondExpr->tableCondIndex = index.tableIndex; } else { - setErrMsg(pCmd, msg7); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg6); } *type = TSQL_EXPR_TBNAME; @@ -3812,8 +3679,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* } if (pCondExpr->pJoinExpr != NULL) { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg4); } pCmd->type |= TSDB_QUERY_TYPE_JOIN_QUERY; @@ -3832,15 +3698,9 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* *type = TSQL_EXPR_COLUMN; if (pRight->nSQLOptr == TK_ID) { // other column cannot be served as the join column - setErrMsg(pCmd, msg6); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg5); } - // if (parentOptr == TK_OR) { - // setErrMsg(pCmd, msg5); - // return TSDB_CODE_INVALID_SQL; - // } - ret = setExprToCond(pCmd, &pCondExpr->pColumnCond, *pExpr, NULL, parentOptr); *pExpr = NULL; // remove it from expr tree } @@ -3882,8 +3742,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, */ if (leftType != rightType) { if ((*pExpr)->nSQLOptr == TK_OR && (leftType + rightType != TSQL_EXPR_TBNAME + TSQL_EXPR_TAG)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -3978,8 +3837,7 @@ int tableNameCompar(const void* lhs, const void* rhs) { return ret > 0 ? 1 : -1; } -static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_t tableCondIndex, - char* tmpTableCondBuf) { +static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_t tableCondIndex, SStringBuilder* sb) { SSqlCmd* pCmd = &pSql->cmd; const char* msg = "meter name too long"; @@ -3992,26 +3850,25 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_ STagCond* pTagCond = &pSql->cmd.tagCond; pTagCond->tbnameCond.uid = pMeterMetaInfo->pMeterMeta->uid; - SString* pTableCond = &pCmd->tagCond.tbnameCond.cond; - SStringAlloc(pTableCond, 4096); - assert(pExpr->nSQLOptr == TK_LIKE || pExpr->nSQLOptr == TK_IN); if (pExpr->nSQLOptr == TK_LIKE) { - strcpy(pTableCond->z, tmpTableCondBuf); - pTableCond->n = strlen(pTableCond->z); + char* str = taosStringBuilderGetResult(sb, NULL); + pCmd->tagCond.tbnameCond.cond = strdup(str); return TSDB_CODE_SUCCESS; } - strcpy(pTableCond->z, QUERY_COND_REL_PREFIX_IN); - pTableCond->n += strlen(QUERY_COND_REL_PREFIX_IN); + SStringBuilder sb1 = {0}; + taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); char db[TSDB_METER_ID_LEN] = {0}; // remove the duplicated input table names int32_t num = 0; - char** segments = strsplit(tmpTableCondBuf + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); - qsort(segments, num, sizeof(void*), tableNameCompar); + char* tableNameString = taosStringBuilderGetResult(sb, NULL); + + char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); + qsort(segments, num, POINTER_BYTES, tableNameCompar); int32_t j = 1; for (int32_t i = 1; i < num; ++i) { @@ -4025,25 +3882,30 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_ char* acc = getAccountId(pSql); for (int32_t i = 0; i < num; ++i) { - SStringEnsureRemain(pTableCond, TSDB_METER_ID_LEN); - if (i >= 1) { - pTableCond->z[pTableCond->n++] = TBNAME_LIST_SEP[0]; + taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); } + char idBuf[TSDB_METER_ID_LEN + 1] = {0}; int32_t xlen = strlen(segments[i]); SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; - int32_t ret = setObjFullName(pTableCond->z + pTableCond->n, acc, &dbToken, &t, &xlen); + int32_t ret = setObjFullName(idBuf, acc, &dbToken, &t, &xlen); if (ret != TSDB_CODE_SUCCESS) { + taosStringBuilderDestroy(&sb1); tfree(segments); - setErrMsg(pCmd, msg); + + invalidSqlErrMsg(pCmd, msg); return ret; } - pTableCond->n += xlen; + taosStringBuilderAppendString(&sb1, idBuf); } + char* str = taosStringBuilderGetResult(&sb1, NULL); + pCmd->tagCond.tbnameCond.cond = strdup(str); + + taosStringBuilderDestroy(&sb1); tfree(segments); return TSDB_CODE_SUCCESS; } @@ -4082,8 +3944,7 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { if (!isExprDirectParentOfLeaftNode(pExpr)) { if (pExpr->nSQLOptr == TK_OR) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } getTimeRangeFromExpr(pCmd, pExpr->pLeft); @@ -4104,8 +3965,7 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { TSKEY etime = INT64_MAX; if (getTimeRange(&stime, &etime, pRight, pExpr->nSQLOptr, pMeterMeta->precision) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } // update the timestamp query range @@ -4130,8 +3990,7 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr) { if (pCmd->numOfTables == 1) { return TSDB_CODE_SUCCESS; } else { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } } @@ -4139,14 +3998,12 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr) { if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // for stable join, tag columns // must be present for join if (pCondExpr->pJoinExpr == NULL) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } if (!pCondExpr->tsJoin) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } return TSDB_CODE_SUCCESS; @@ -4174,129 +4031,127 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) { } } -int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { - SSqlCmd* pCmd = &pSql->cmd; - - if (pExpr == NULL) { - return TSDB_CODE_SUCCESS; - } - - pCmd->stime = 0; - pCmd->etime = INT64_MAX; - - int32_t ret = TSDB_CODE_SUCCESS; - - const char* msg1 = "invalid expression"; - SCondExpr condExpr = {0}; - - if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; - } - - ret = doParseWhereClause(pSql, pExpr, &condExpr); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - +static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SCondExpr* pCondExpr) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); if (QUERY_IS_JOIN_QUERY(pCmd->type) && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { SColumnIndex index = {0}; - getColumnIndexByNameEx(&condExpr.pJoinExpr->pLeft->colInfo, pCmd, &index); + getColumnIndexByNameEx(&pCondExpr->pJoinExpr->pLeft->colInfo, pCmd, &index); pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); int32_t columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); - getColumnIndexByNameEx(&condExpr.pJoinExpr->pRight->colInfo, pCmd, &index); + getColumnIndexByNameEx(&pCondExpr->pJoinExpr->pRight->colInfo, pCmd, &index); pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); } +} + +static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr, tSQLExpr** pExpr) { + int32_t ret = TSDB_CODE_SUCCESS; + + if (pCondExpr->pTagCond != NULL) { + for (int32_t i = 0; i < pCmd->numOfTables; ++i) { + tSQLExpr* p1 = extractExprForSTable(pExpr, pCmd, i); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + + char c[TSDB_MAX_TAGS_LEN] = {0}; + char* str = c; + + if ((ret = getTagCondString(pCmd, p1, &str)) != TSDB_CODE_SUCCESS) { + return ret; + } + + tsSetMetricQueryCond(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); + + doCompactQueryExpr(pExpr); + tSQLExprDestroy(p1); + } + + pCondExpr->pTagCond = NULL; + } - cleanQueryExpr(&condExpr); return ret; } +int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } -int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr) { const char* msg = "invalid filter expression"; + const char* msg1 = "invalid expression"; + + int32_t ret = TSDB_CODE_SUCCESS; - int32_t type = 0; SSqlCmd* pCmd = &pSql->cmd; + pCmd->stime = 0; + pCmd->etime = INT64_MAX; - /* - * tags query condition may be larger than 512bytes, - * therefore, we need to prepare enough large space - */ - char tableNameCond[TSDB_MAX_SQL_LEN] = {0}; + // tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space + SStringBuilder sb = {0}; + SCondExpr condExpr = {0}; - int32_t ret = TSDB_CODE_SUCCESS; - if ((ret = getQueryCondExpr(pCmd, pExpr, condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { + if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { + return invalidSqlErrMsg(pCmd, msg1); + } + + int32_t type = 0; + if ((ret = getQueryCondExpr(pCmd, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { return ret; } doCompactQueryExpr(pExpr); // after expression compact, the expression tree is only include tag query condition - condExpr->pTagCond = (*pExpr); + condExpr.pTagCond = (*pExpr); // 1. check if it is a join query - if ((ret = validateJoinExpr(pCmd, condExpr)) != TSDB_CODE_SUCCESS) { + if ((ret = validateJoinExpr(pCmd, &condExpr)) != TSDB_CODE_SUCCESS) { return ret; } // 2. get the query time range - if ((ret = getTimeRangeFromExpr(pCmd, condExpr->pTimewindow)) != TSDB_CODE_SUCCESS) { + if ((ret = getTimeRangeFromExpr(pCmd, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { return ret; } // 3. get the tag query condition - if (condExpr->pTagCond != NULL) { - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - tSQLExpr* p1 = extractExprForSTable(pExpr, pCmd, i); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - - char c[TSDB_MAX_TAGS_LEN] = {0}; - char* str = c; - if ((ret = getTagCondString(pCmd, p1, &str)) != TSDB_CODE_SUCCESS) { - return ret; - } - - tsSetMetricQueryCond(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); - - doCompactQueryExpr(pExpr); - tSQLExprDestroy(p1); - } - - condExpr->pTagCond = NULL; + if ((ret = getTagQueryCondExpr(pCmd, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { + return ret; } // 4. get the table name query condition - if ((ret = getTablenameCond(pCmd, condExpr->pTableCond, tableNameCond)) != TSDB_CODE_SUCCESS) { + if ((ret = getTablenameCond(pCmd, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { return ret; } // 5. other column query condition - if ((ret = getColumnQueryCondInfo(pCmd, condExpr->pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { + if ((ret = getColumnQueryCondInfo(pCmd, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { return ret; } // 6. join condition - if ((ret = getJoinCondInfo(pSql, condExpr->pJoinExpr)) != TSDB_CODE_SUCCESS) { + if ((ret = getJoinCondInfo(pSql, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { return ret; } // 7. query condition for table name - pCmd->tagCond.relType = (condExpr->relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; - ret = setTableCondForMetricQuery(pSql, condExpr->pTableCond, condExpr->tableCondIndex, tableNameCond); + pCmd->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; + + ret = setTableCondForMetricQuery(pSql, condExpr.pTableCond, condExpr.tableCondIndex, &sb); + taosStringBuilderDestroy(&sb); + if (!validateFilterExpr(pCmd)) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } + doAddJoinTagsColumnsIntoTagList(pCmd, &condExpr); + + cleanQueryExpr(&condExpr); return ret; } @@ -4412,8 +4267,7 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { for (int32_t j = i + 1; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { if (strncasecmp(fieldName, tscFieldInfoGetField(pCmd, j)->name, TSDB_COL_NAME_LEN) == 0) { const char* msg = "duplicated column name in new table"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } } } @@ -4431,8 +4285,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL) { const char* msg2 = "invalid fill option"; if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } if (strncasecmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) { @@ -4452,8 +4305,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL) { pCmd->interpoType = TSDB_INTERPO_SET_VALUE; if (pFillToken->nExpr == 1) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } int32_t startPos = 1; @@ -4478,8 +4330,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL) { int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pCmd->defaultVal[i], pFields->type); if (ret != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { @@ -4501,8 +4352,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL) { } } } else { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } return TSDB_CODE_SUCCESS; @@ -4549,13 +4399,11 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema */ if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { if (pSortorder->nExpr > 1) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } } else { if (pSortorder->nExpr > 2) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } } @@ -4572,13 +4420,11 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // metric query if (getColumnIndexByNameEx(&columnName, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } bool orderByTags = false; bool orderByTS = false; - bool orderByCol = false; if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { int32_t relTagIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; @@ -4594,8 +4440,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema } if (!(orderByTags || orderByTS) && !isTopBottomQuery(pCmd)) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } else { assert(!(orderByTags && orderByTS)); } @@ -4611,8 +4456,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema pExpr = tscSqlExprGet(pCmd, 1); if (pExpr->colInfo.colIdx != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; @@ -4636,13 +4480,11 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema tVariant* pVar2 = &pSortorder->a[1].pVar; SSQLToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByNameEx(&cname, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } else { pCmd->order.order = pSortorder->a[1].sortOrder; pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -4651,13 +4493,11 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema } else { // meter query if (getColumnIndexByNameEx(&columnName, pCmd, &index) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pCmd)) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } if (isTopBottomQuery(pCmd)) { @@ -4667,8 +4507,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema pExpr = tscSqlExprGet(pCmd, 1); if (pExpr->colInfo.colIdx != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; @@ -4693,14 +4532,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (tscValidateName(&(pAlterSQL->name)) != TSDB_CODE_SUCCESS) { const char* msg = "invalid table name"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (setMeterID(pSql, &(pAlterSQL->name), 0) != TSDB_CODE_SUCCESS) { const char* msg = "table name too long"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo->name, DEFAULT_TABLE_INDEX); @@ -4715,18 +4552,15 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pInfo->sqlType == ALTER_TABLE_TAGS_CHG) { if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { const char* msg = "manipulation of tag available for metric"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } } else if ((pInfo->sqlType == ALTER_TABLE_TAGS_SET) && (UTIL_METER_IS_METRIC(pMeterMetaInfo))) { const char* msg = "set tag value only available for table"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } else if ((pInfo->sqlType == ALTER_TABLE_ADD_COLUMN || pInfo->sqlType == ALTER_TABLE_DROP_COLUMN) && UTIL_METER_IS_CREATE_FROM_METRIC(pMeterMetaInfo)) { const char* msg = "column can only be modified by metric"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (pInfo->sqlType == ALTER_TABLE_TAGS_ADD) { @@ -4735,8 +4569,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tFieldList* pFieldList = pAlterSQL->pAddColumns; if (pFieldList->nField > 1) { const char* msg = "only support add one tag"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (!validateOneTags(pCmd, &pFieldList->p[0])) { @@ -4756,20 +4589,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg5 = "primary tag cannot be dropped"; if (pMeterMeta->numOfTags == 1) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } // numOfTags == 1 if (pAlterSQL->varList->nExpr > 1) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } tVariantListItem* pItem = &pAlterSQL->varList->a[0]; if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } int32_t idx = -1; @@ -4785,11 +4615,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (idx == -1) { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg4); } else if (idx == 0) { - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg5); } char name[128] = {0}; @@ -4812,13 +4640,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pDstItem = &pAlterSQL->varList->a[1]; if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } if (pSrcItem->pVar.nType != TSDB_DATA_TYPE_BINARY || pDstItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER; @@ -4858,8 +4684,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariant* pTagName = &pVarList->a[0].pVar; if (pTagName->nLen > TSDB_COL_NAME_LEN) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } int32_t tagsIndex = -1; @@ -4873,20 +4698,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (tagsIndex == -1) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } if (tVariantDump(&pVarList->a[1].pVar, pCmd->payload, pTagsSchema[tagsIndex].type) != TSDB_CODE_SUCCESS) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } // validate the length of binary if ((pTagsSchema[tagsIndex].type == TSDB_DATA_TYPE_BINARY || pTagsSchema[tagsIndex].type == TSDB_DATA_TYPE_NCHAR) && pVarList->a[1].pVar.nLen > pTagsSchema[tagsIndex].bytes) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } char name[128] = {0}; @@ -4900,8 +4722,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tFieldList* pFieldList = pAlterSQL->pAddColumns; if (pFieldList->nField > 1) { const char* msg = "only support add one column"; - setErrMsg(pCmd, msg); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } if (!validateOneColumn(pCmd, &pFieldList->p[0])) { @@ -4920,19 +4741,16 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg5 = "primary timestamp column cannot be dropped"; if (pMeterMeta->numOfColumns == TSDB_MIN_COLUMNS) { // - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } if (pAlterSQL->varList->nExpr > 1) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } tVariantListItem* pItem = &pAlterSQL->varList->a[0]; if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } int32_t idx = -1; @@ -4947,11 +4765,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (idx == -1) { - setErrMsg(pCmd, msg4); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg4); } else if (idx == 0) { - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg5); } char name[128] = {0}; @@ -4969,15 +4785,13 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd) { const char* msg1 = "functions not allowed in select clause"; if (pCmd->nAggTimeInterval != 0 && pCmd->nAggTimeInterval < 10) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { int32_t functId = tscSqlExprGet(pCmd, i)->functionId; if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -4986,11 +4800,28 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd) { int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd) { bool isProjectionFunction = false; - const char* msg = "column projection is not compatible with interval"; + const char* msg1 = "column projection is not compatible with interval"; // multi-output set/ todo refactor for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, k); + + // projection query on primary timestamp, the selectivity function needs to be present. + if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + bool hasSelectivity = false; + for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr* pEx = tscSqlExprGet(pCmd, j); + if ((aAggs[pEx->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) { + hasSelectivity = true; + break; + } + } + + if (hasSelectivity) { + continue; + } + } + if (pExpr->functionId == TSDB_FUNC_PRJ || pExpr->functionId == TSDB_FUNC_DIFF || pExpr->functionId == TSDB_FUNC_ARITHM) { isProjectionFunction = true; @@ -4998,15 +4829,15 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd) { } if (isProjectionFunction) { - setErrMsg(pCmd, msg); + invalidSqlErrMsg(pCmd, msg1); } return isProjectionFunction == true ? TSDB_CODE_INVALID_SQL : TSDB_CODE_SUCCESS; } typedef struct SDNodeDynConfOption { - char* name; - int32_t len; + char* name; // command name + int32_t len; // name string length } SDNodeDynConfOption; int32_t validateDNodeConfig(tDCLSQL* pOptions) { @@ -5014,7 +4845,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) { return TSDB_CODE_INVALID_SQL; } - SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[14] = { + const SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[14] = { {"resetLog", 8}, {"resetQueryCache", 15}, {"dDebugFlag", 10}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12}, {"cDebugFlag", 10}, {"uDebugFlag", 10}, {"mDebugFlag", 10}, {"sdbDebugFlag", 12}, {"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"qDebugflag", 10}, @@ -5025,7 +4856,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) { if (pOptions->nTokens == 2) { // reset log and reset query cache does not need value for (int32_t i = 0; i < 2; ++i) { - SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i]; + const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i]; if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { return TSDB_CODE_SUCCESS; } @@ -5048,7 +4879,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) { } for (int32_t i = 2; i < tListLen(DNODE_DYNAMIC_CFG_OPTIONS) - 1; ++i) { - SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i]; + const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i]; if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { /* options is valid */ @@ -5150,11 +4981,12 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { // handle the limit offset value, validate the limit pCmd->limit = pQuerySql->limit; + pCmd->globalLimit = pCmd->limit.limit; + pCmd->slimit = pQuerySql->slimit; if (pCmd->slimit.offset < 0 || pCmd->limit.offset < 0) { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg0); } if (pCmd->limit.limit == 0) { @@ -5163,9 +4995,8 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { } if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - bool queryOnTags = false; - int32_t ret = tscQueryOnlyMetricTags(pCmd, &queryOnTags); - if (ret != TSDB_CODE_SUCCESS) { + bool queryOnTags = false; + if (tscQueryOnlyMetricTags(pCmd, &queryOnTags) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } @@ -5173,8 +5004,7 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { pCmd->command = TSDB_SQL_RETRIEVE_TAGS; } else { if (tscProjectionQueryOnMetric(pCmd) && (pCmd->slimit.limit > 0 || pCmd->slimit.offset > 0)) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } } @@ -5205,16 +5035,14 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { pCmd->globalLimit = pCmd->limit.limit; } else { if (pCmd->slimit.limit != -1 || pCmd->slimit.offset != 0) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } // filter the query functions operating on "tbname" column that are not supported by normal columns. for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); if (pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } } } @@ -5222,57 +5050,43 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { return TSDB_CODE_SUCCESS; } -static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { - pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default +static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { + const char* msg = "invalid number of options"; pMsg->daysToKeep = htonl(-1); pMsg->daysToKeep1 = htonl(-1); pMsg->daysToKeep2 = htonl(-1); - pMsg->blocksPerMeter = (pCreateDb->numOfBlocksPerTable == 0) ? htons(-1) : htons(pCreateDb->numOfBlocksPerTable); - pMsg->compression = (pCreateDb->compressionLevel == 0) ? -1 : pCreateDb->compressionLevel; - - pMsg->commitLog = (pCreateDb->commitLog == 0) ? -1 : pCreateDb->commitLog; - pMsg->commitTime = (pCreateDb->commitTime == 0) ? htonl(-1) : htonl(pCreateDb->commitTime); - pMsg->maxSessions = (pCreateDb->tablesPerVnode == 0) ? htonl(-1) : htonl(pCreateDb->tablesPerVnode); - pMsg->cacheNumOfBlocks.fraction = (pCreateDb->numOfAvgCacheBlocks == 0) ? -1 : pCreateDb->numOfAvgCacheBlocks; - pMsg->cacheBlockSize = (pCreateDb->cacheBlockSize == 0) ? htonl(-1) : htonl(pCreateDb->cacheBlockSize); - pMsg->rowsInFileBlock = (pCreateDb->rowPerFileBlock == 0) ? htonl(-1) : htonl(pCreateDb->rowPerFileBlock); - pMsg->daysPerFile = (pCreateDb->daysPerFile == 0) ? htonl(-1) : htonl(pCreateDb->daysPerFile); - pMsg->replications = (pCreateDb->replica == 0) ? -1 : pCreateDb->replica; -} - -int32_t parseCreateDBOptions(SCreateDBInfo* pCreateDbSql, SSqlCmd* pCmd) { - const char* msg0 = "invalid number of options"; - const char* msg1 = "invalid time precision"; - - SCreateDbMsg* pMsg = (SCreateDbMsg*)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead)); - setCreateDBOption(pMsg, pCreateDbSql); - - if (pCreateDbSql->keep != NULL) { - switch (pCreateDbSql->keep->nExpr) { + tVariantList* pKeep = pCreateDb->keep; + if (pKeep != NULL) { + switch (pKeep->nExpr) { case 1: - pMsg->daysToKeep = htonl(pCreateDbSql->keep->a[0].pVar.i64Key); + pMsg->daysToKeep = htonl(pKeep->a[0].pVar.i64Key); break; case 2: { - pMsg->daysToKeep = htonl(pCreateDbSql->keep->a[0].pVar.i64Key); - pMsg->daysToKeep1 = htonl(pCreateDbSql->keep->a[1].pVar.i64Key); + pMsg->daysToKeep = htonl(pKeep->a[0].pVar.i64Key); + pMsg->daysToKeep1 = htonl(pKeep->a[1].pVar.i64Key); break; } case 3: { - pMsg->daysToKeep = htonl(pCreateDbSql->keep->a[0].pVar.i64Key); - pMsg->daysToKeep1 = htonl(pCreateDbSql->keep->a[1].pVar.i64Key); - pMsg->daysToKeep2 = htonl(pCreateDbSql->keep->a[2].pVar.i64Key); + pMsg->daysToKeep = htonl(pKeep->a[0].pVar.i64Key); + pMsg->daysToKeep1 = htonl(pKeep->a[1].pVar.i64Key); + pMsg->daysToKeep2 = htonl(pKeep->a[2].pVar.i64Key); break; } - default: { - setErrMsg(pCmd, msg0); - return TSDB_CODE_INVALID_SQL; - } + default: { return invalidSqlErrMsg(pCmd, msg); } } } - SSQLToken* pToken = &pCreateDbSql->precision; + return TSDB_CODE_SUCCESS; +} + +static int32_t setTimePrecisionOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) { + const char* msg = "invalid time precision"; + + pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default + + SSQLToken* pToken = &pCreateDbInfo->precision; if (pToken->n > 0) { pToken->n = strdequote(pToken->z); @@ -5284,14 +5098,46 @@ int32_t parseCreateDBOptions(SCreateDBInfo* pCreateDbSql, SSqlCmd* pCmd) { strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) { pMsg->precision = TSDB_TIME_PRECISION_MICRO; } else { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg); } } return TSDB_CODE_SUCCESS; } +static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { + pMsg->blocksPerMeter = htons(pCreateDb->numOfBlocksPerTable); + pMsg->compression = pCreateDb->compressionLevel; + + pMsg->commitLog = (char)pCreateDb->commitLog; + pMsg->commitTime = htonl(pCreateDb->commitTime); + pMsg->maxSessions = htonl(pCreateDb->tablesPerVnode); + pMsg->cacheNumOfBlocks.fraction = pCreateDb->numOfAvgCacheBlocks; + pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize); + pMsg->rowsInFileBlock = htonl(pCreateDb->rowPerFileBlock); + pMsg->daysPerFile = htonl(pCreateDb->daysPerFile); + pMsg->replications = pCreateDb->replica; +} + +int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) { + SCreateDbMsg* pMsg = (SCreateDbMsg*)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead)); + setCreateDBOption(pMsg, pCreateDbSql); + + if (setKeepOption(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + if (setTimePrecisionOption(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + if (tscCheckCreateDbParams(pCmd, pMsg) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + void tscAddTimestampColumn(SSqlCmd* pCmd, int16_t functionId, int16_t tableIndex) { // the first column not timestamp column, add it SSqlExpr* pExpr = NULL; @@ -5320,7 +5166,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIn if (pExpr->functionId != TSDB_FUNC_TAG) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - int16_t columnInfo = tscGetJoinTagColIndexByUid(pCmd, pMeterMetaInfo->pMeterMeta->uid); + int16_t columnInfo = tscGetJoinTagColIndexByUid(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid); SColumnIndex index = {.tableIndex = 0, .columnIndex = columnInfo}; SSchema* pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); @@ -5382,9 +5228,6 @@ static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { } } - int16_t resType = 0; - int16_t resBytes = 0; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); @@ -5398,6 +5241,26 @@ static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { } } +static void doUpdateSqlFunctionForColPrj(SSqlCmd* pCmd) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + if (pExpr->functionId == TSDB_FUNC_PRJ) { + bool qualifiedCol = false; + for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { + if (pExpr->colInfo.colId == pCmd->groupbyExpr.columnInfo[j].colId) { + qualifiedCol = true; + + pExpr->param[0].i64Key = 1; // limit the output to be 1 for each state value + pExpr->numOfParams = 1; + break; + } + } + + assert(qualifiedCol); + } + } +} + static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId) { for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) { if (columnId == pGroupbyExpr->columnInfo[j].colId && pGroupbyExpr->columnInfo[j].flag == TSDB_COL_TAG) { @@ -5461,10 +5324,9 @@ static void updateTagPrjFunction(SSqlCmd* pCmd) { */ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { const char* msg1 = "only one selectivity function allowed in presence of tags function"; - const char* msg2 = "functions not allowed"; + const char* msg3 = "aggregation function should not be mixed up with projection"; bool tagColExists = false; - int16_t numOfTimestamp = 0; // primary timestamp column int16_t numOfSelectivity = 0; int16_t numOfAggregation = 0; @@ -5477,25 +5339,25 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { } } - if (tagColExists) { // check if the selectivity function exists - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; - if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS) { - continue; - } + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; + if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || + functionId == TSDB_FUNC_ARITHM) { + continue; + } - if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { - numOfSelectivity++; - } else { - numOfAggregation++; - } + if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { + numOfSelectivity++; + } else { + numOfAggregation++; } + } + if (tagColExists) { // check if the selectivity function exists // When the tag projection function on tag column that is not in the group by clause, aggregation function and // selectivity function exist in select clause is not allowed. - if(numOfAggregation > 0) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + if (numOfAggregation > 0) { + return invalidSqlErrMsg(pCmd, msg1); } /* @@ -5503,6 +5365,7 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { */ if (numOfSelectivity == 1) { doUpdateSqlFunctionForTagPrj(pCmd); + doUpdateSqlFunctionForColPrj(pCmd); } else if (numOfSelectivity > 1) { /* * If more than one selectivity functions exist, all the selectivity functions must be last_row. @@ -5515,12 +5378,24 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { } if (((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) && (functionId != TSDB_FUNC_LAST_ROW)) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } doUpdateSqlFunctionForTagPrj(pCmd); + doUpdateSqlFunctionForColPrj(pCmd); + } + } else { + if ((pCmd->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) == TSDB_QUERY_TYPE_PROJECTION_QUERY) { + if (numOfAggregation > 0 && pCmd->groupbyExpr.numOfGroupCols == 0) { + return invalidSqlErrMsg(pCmd, msg3); + } + + if (numOfAggregation > 0 || numOfSelectivity > 0) { + // clear the projection type flag + pCmd->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY); + doUpdateSqlFunctionForColPrj(pCmd); + } } } @@ -5568,8 +5443,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { } else { // if this query is "group by" normal column, interval is not allowed if (pCmd->nAggTimeInterval > 0) { - setErrMsg(pCmd, msg2); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } bool hasGroupColumn = false; @@ -5595,30 +5469,22 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { const char* msg1 = "functions/columns not allowed in group by query"; - const char* msg2 = "interval not allowed in group by normal column"; + const char* msg2 = "projection query on columns not allowed"; const char* msg3 = "group by not allowed on projection query"; - const char* msg4 = "tags retrieve not compatible with group by"; - const char* msg5 = "retrieve tags not compatible with group by "; + const char* msg4 = "retrieve tags not compatible with group by or interval query"; - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd* pCmd = &pSql->cmd; // only retrieve tags, group by is not supportted if (pCmd->command == TSDB_SQL_RETRIEVE_TAGS) { - if (pCmd->groupbyExpr.numOfGroupCols > 0) { - setErrMsg(pCmd, msg5); - return TSDB_CODE_INVALID_SQL; + if (pCmd->groupbyExpr.numOfGroupCols > 0 || pCmd->nAggTimeInterval > 0) { + return invalidSqlErrMsg(pCmd, msg4); } else { return TSDB_CODE_SUCCESS; } } if (pCmd->groupbyExpr.numOfGroupCols > 0) { - SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - int16_t bytes = 0; - int16_t type = 0; - char* name = NULL; - // check if all the tags prj columns belongs to the group by columns if (onlyTagPrjFunction(pCmd) && allTagPrjInGroupby(pCmd)) { updateTagPrjFunction(pCmd); @@ -5634,7 +5500,7 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { * group by normal columns. * Check if the column projection is identical to the group by column or not */ - if (functId == TSDB_FUNC_PRJ) { + if (functId == TSDB_FUNC_PRJ && pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool qualified = false; for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { SColIndexEx* pColIndex = &pCmd->groupbyExpr.columnInfo[j]; @@ -5645,19 +5511,17 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { } if (!qualified) { - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg2); } } if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM && - functId != TSDB_FUNC_TAGPRJ) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) { + return invalidSqlErrMsg(pCmd, msg1); } if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX) { - setErrMsg(pCmd, msg1); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg1); } } @@ -5675,10 +5539,170 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { // projection query on metric does not compatible with "group by" syntax if (tscProjectionQueryOnMetric(pCmd)) { - setErrMsg(pCmd, msg3); - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pCmd, msg3); } + + return TSDB_CODE_SUCCESS; } else { return checkUpdateTagPrjFunctions(pCmd); } } + +int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd) { + const char* msg1 = "only one expression allowed"; + const char* msg2 = "invalid expression in select clause"; + const char* msg3 = "invalid function"; + + tSQLExprList* pExprList = pQuerySql->pSelection; + if (pExprList->nExpr != 1) { + return invalidSqlErrMsg(pCmd, msg1); + } + + tSQLExpr* pExpr = pExprList->a[0].pNode; + if (pExpr->operand.z == NULL) { + return invalidSqlErrMsg(pCmd, msg2); + } + + // TODO redefine the function + SDNodeDynConfOption functionsInfo[5] = {{"database()", 10}, + {"server_version()", 16}, + {"server_status()", 15}, + {"client_version()", 16}, + {"current_user()", 14}}; + + int32_t index = -1; + for (int32_t i = 0; i < tListLen(functionsInfo); ++i) { + if (strncasecmp(functionsInfo[i].name, pExpr->operand.z, functionsInfo[i].len) == 0 && + functionsInfo[i].len == pExpr->operand.n) { + index = i; + break; + } + } + + SSqlExpr* pExpr1 = tscSqlExprInsertEmpty(pCmd, 0, TSDB_FUNC_TAG_DUMMY); + if (pExprList->a[0].aliasName != NULL) { + strncpy(pExpr1->aliasName, pExprList->a[0].aliasName, tListLen(pExpr1->aliasName)); + } else { + strncpy(pExpr1->aliasName, functionsInfo[index].name, tListLen(pExpr1->aliasName)); + } + + switch (index) { + case 0: + pCmd->command = TSDB_SQL_CURRENT_DB; + return TSDB_CODE_SUCCESS; + case 1: + pCmd->command = TSDB_SQL_SERV_VERSION; + return TSDB_CODE_SUCCESS; + case 2: + pCmd->command = TSDB_SQL_SERV_STATUS; + return TSDB_CODE_SUCCESS; + case 3: + pCmd->command = TSDB_SQL_CLI_VERSION; + return TSDB_CODE_SUCCESS; + case 4: + pCmd->command = TSDB_SQL_CURRENT_USER; + return TSDB_CODE_SUCCESS; + default: { return invalidSqlErrMsg(pCmd, msg3); } + } +} + +// can only perform the parameters based on the macro definitation +int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) { + char msg[512] = {0}; + + if (pCreate->commitLog != -1 && (pCreate->commitLog < 0 || pCreate->commitLog > 1)) { + snprintf(msg, tListLen(msg), "invalid db option commitLog: %d, only 0 or 1 allowed", pCreate->commitLog); + return invalidSqlErrMsg(pCmd, msg); + } + + if (pCreate->replications != -1 && + (pCreate->replications < TSDB_REPLICA_MIN_NUM || pCreate->replications > TSDB_REPLICA_MAX_NUM)) { + snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications, + TSDB_REPLICA_MIN_NUM, TSDB_REPLICA_MAX_NUM); + return invalidSqlErrMsg(pCmd, msg); + } + + int32_t val = htonl(pCreate->daysPerFile); + if (val != -1 && (val < TSDB_FILE_MIN_PARTITION_RANGE || val > TSDB_FILE_MAX_PARTITION_RANGE)) { + snprintf(msg, tListLen(msg), "invalid db option daysPerFile: %d valid range: [%d, %d]", val, + TSDB_FILE_MIN_PARTITION_RANGE, TSDB_FILE_MAX_PARTITION_RANGE); + return invalidSqlErrMsg(pCmd, msg); + } + + val = htonl(pCreate->rowsInFileBlock); + if (val != -1 && (val < TSDB_MIN_ROWS_IN_FILEBLOCK || val > TSDB_MAX_ROWS_IN_FILEBLOCK)) { + snprintf(msg, tListLen(msg), "invalid db option rowsInFileBlock: %d valid range: [%d, %d]", val, + TSDB_MIN_ROWS_IN_FILEBLOCK, TSDB_MAX_ROWS_IN_FILEBLOCK); + return invalidSqlErrMsg(pCmd, msg); + } + + val = htonl(pCreate->cacheBlockSize); + if (val != -1 && (val < TSDB_MIN_CACHE_BLOCK_SIZE || val > TSDB_MAX_CACHE_BLOCK_SIZE)) { + snprintf(msg, tListLen(msg), "invalid db option cacheBlockSize: %d valid range: [%d, %d]", val, + TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MAX_CACHE_BLOCK_SIZE); + return invalidSqlErrMsg(pCmd, msg); + } + + val = htonl(pCreate->maxSessions); + if (val != -1 && (val < TSDB_MIN_TABLES_PER_VNODE || val > TSDB_MAX_TABLES_PER_VNODE)) { + snprintf(msg, tListLen(msg), "invalid db option maxSessions: %d valid range: [%d, %d]", val, + TSDB_MIN_TABLES_PER_VNODE, TSDB_MAX_TABLES_PER_VNODE); + return invalidSqlErrMsg(pCmd, msg); + } + + if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) { + snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision, + TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO); + return invalidSqlErrMsg(pCmd, msg); + } + + if (pCreate->cacheNumOfBlocks.fraction != -1 && (pCreate->cacheNumOfBlocks.fraction < TSDB_MIN_AVG_BLOCKS || + pCreate->cacheNumOfBlocks.fraction > TSDB_MAX_AVG_BLOCKS)) { + snprintf(msg, tListLen(msg), "invalid db option ablocks: %f valid value: [%d, %d]", + pCreate->cacheNumOfBlocks.fraction, TSDB_MIN_AVG_BLOCKS, TSDB_MAX_AVG_BLOCKS); + return invalidSqlErrMsg(pCmd, msg); + } + + val = htonl(pCreate->commitTime); + if (val != -1 && (val < TSDB_MIN_COMMIT_TIME_INTERVAL || val > TSDB_MAX_COMMIT_TIME_INTERVAL)) { + snprintf(msg, tListLen(msg), "invalid db option commitTime: %d valid range: [%d, %d]", val, + TSDB_MIN_COMMIT_TIME_INTERVAL, TSDB_MAX_COMMIT_TIME_INTERVAL); + return invalidSqlErrMsg(pCmd, msg); + } + + if (pCreate->compression != -1 && + (pCreate->compression < TSDB_MIN_COMPRESSION_LEVEL || pCreate->compression > TSDB_MAX_COMPRESSION_LEVEL)) { + snprintf(msg, tListLen(msg), "invalid db option compression: %d valid range: [%d, %d]", pCreate->compression, + TSDB_MIN_COMPRESSION_LEVEL, TSDB_MAX_COMPRESSION_LEVEL); + return invalidSqlErrMsg(pCmd, msg); + } + + return TSDB_CODE_SUCCESS; +} + +// for debug purpose +void tscPrintSelectClause(SSqlCmd* pCmd) { + if (pCmd == NULL || pCmd->exprsInfo.numOfExprs == 0) { + return; + } + + char* str = calloc(1, 10240); + int32_t offset = 0; + + offset += sprintf(str, "%d [", pCmd->exprsInfo.numOfExprs); + for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + + int32_t size = sprintf(str + offset, "%s(%d)", aAggs[pExpr->functionId].aName, pExpr->colInfo.colId); + offset += size; + + if (i < pCmd->exprsInfo.numOfExprs - 1) { + str[offset++] = ','; + } + } + + str[offset] = ']'; + printf("%s\n", str); + + free(str); +} diff --git a/src/client/src/tscSQLParserImpl.c b/src/client/src/tscSQLParserImpl.c index 7e1b4a7cf19e75274f65f10d8344c200c7ad8fb7..cc4375fb03896b240bc57fdabcf2728ade996329 100644 --- a/src/client/src/tscSQLParserImpl.c +++ b/src/client/src/tscSQLParserImpl.c @@ -13,16 +13,11 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include - #include "os.h" +#include "taosmsg.h" #include "tglobalcfg.h" -#include "tsql.h" +#include "tlog.h" +#include "tscSQLParser.h" #include "tstoken.h" #include "ttime.h" #include "tutil.h" @@ -507,7 +502,7 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SSQLToken *type) { SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere, tVariantList *pGroupby, tVariantList *pSortOrder, SSQLToken *pInterval, SSQLToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { - assert(pSelection != NULL && pFrom != NULL && pInterval != NULL && pLimit != NULL && pGLimit != NULL); + assert(pSelection != NULL); SQuerySQL *pQuery = calloc(1, sizeof(SQuerySQL)); pQuery->selectToken = *pSelectToken; @@ -519,13 +514,23 @@ SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection, pQuery->pSortOrder = pSortOrder; pQuery->pWhere = pWhere; - pQuery->limit = *pLimit; - pQuery->slimit = *pGLimit; + if (pLimit != NULL) { + pQuery->limit = *pLimit; + } - pQuery->interval = *pInterval; - pQuery->sliding = *pSliding; - pQuery->fillType = pFill; + if (pGLimit != NULL) { + pQuery->slimit = *pGLimit; + } + + if (pInterval != NULL) { + pQuery->interval = *pInterval; + } + + if (pSliding != NULL) { + pQuery->sliding = *pSliding; + } + pQuery->fillType = pFill; return pQuery; } @@ -739,3 +744,22 @@ void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken tTokenListAppend(pInfo->pDCLInfo, pPwd); } } + +void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) { + pDBInfo->numOfBlocksPerTable = 50; + pDBInfo->compressionLevel = -1; + + pDBInfo->commitLog = -1; + pDBInfo->commitTime = -1; + pDBInfo->tablesPerVnode = -1; + pDBInfo->numOfAvgCacheBlocks = -1; + + pDBInfo->cacheBlockSize = -1; + pDBInfo->rowPerFileBlock = -1; + pDBInfo->daysPerFile = -1; + + pDBInfo->replica = -1; + pDBInfo->keep = NULL; + + memset(&pDBInfo->precision, 0, sizeof(SSQLToken)); +} diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index 9728811ae70286264ed0ee6d91b144a1743a1120..648c25657cf4e60549ed9a60b702aea9d8f1445a 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -13,10 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include - #include "os.h" #include "taosmsg.h" #include "tschemautil.h" @@ -87,6 +83,13 @@ struct SSchema* tsGetColumnSchema(SMeterMeta* pMeta, int32_t startCol) { return (SSchema*)(((char*)pMeta + sizeof(SMeterMeta)) + startCol * sizeof(SSchema)); } +struct SSchema tsGetTbnameColumnSchema() { + struct SSchema s = {.colId = TSDB_TBNAME_COLUMN_INDEX, .type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; + strcpy(s.name, TSQL_TBNAME_L); + + return s; +} + /** * the MeterMeta data format in memory is as follows: * @@ -127,35 +130,40 @@ bool tsMeterMetaIdentical(SMeterMeta* p1, SMeterMeta* p2) { return memcmp(p1, p2, size) == 0; } -static FORCE_INLINE char* skipSegments(char* input, char delimiter, int32_t num) { +// todo refactor +static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) { for (int32_t i = 0; i < num; ++i) { - while (*input != 0 && *input++ != delimiter) { + while (*input != 0 && *input++ != delim) { }; } return input; } -static FORCE_INLINE void copySegment(char* dst, char* src, char delimiter) { +static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) { + size_t len = 0; while (*src != delimiter && *src != 0) { *dst++ = *src++; + len++; } + + return len; } /** - * extract meter name from meterid, which the format of userid.dbname.metername + * extract table name from meterid, which the format of userid.dbname.metername * @param meterId * @return */ -void extractMeterName(char* meterId, char* name) { +void extractTableName(char* meterId, char* name) { char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 2); - copySegment(name, r, TS_PATH_DELIMITER[0]); + copy(name, r, TS_PATH_DELIMITER[0]); } SSQLToken extractDBName(char* meterId, char* name) { char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 1); - copySegment(name, r, TS_PATH_DELIMITER[0]); + size_t len = copy(name, r, TS_PATH_DELIMITER[0]); - SSQLToken token = {.z = name, .n = strlen(name), .type = TK_STRING}; + SSQLToken token = {.z = name, .n = len, .type = TK_STRING}; return token; } diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c index ac2638f635961bcdab3593994228164d79fdb6a0..737c1342d8b8d852a9fa66fd8c5635be15b4d911 100644 --- a/src/client/src/tscSecondaryMerge.c +++ b/src/client/src/tscSecondaryMerge.c @@ -13,13 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include - -#include "tlosertree.h" +#include "os.h" #include "tlosertree.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" @@ -437,11 +431,10 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; // there is no more result, so we release all allocated resource - SLocalReducer *pLocalReducer = - (SLocalReducer *)__sync_val_compare_and_swap_64(&pRes->pLocalReducer, pRes->pLocalReducer, 0); + SLocalReducer *pLocalReducer = (SLocalReducer*)atomic_exchange_ptr(&pRes->pLocalReducer, NULL); if (pLocalReducer != NULL) { int32_t status = 0; - while ((status = __sync_val_compare_and_swap_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, + while ((status = atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_TOBE_FREED)) == TSC_LOCALREDUCE_IN_PROGRESS) { taosMsleep(100); tscTrace("%p waiting for delete procedure, status: %d", pSql, status); @@ -1321,8 +1314,10 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { tscTrace("%s call the drop local reducer", __FUNCTION__); tscDestroyLocalReducer(pSql); - pRes->numOfRows = 0; - pRes->row = 0; + if (pRes) { + pRes->numOfRows = 0; + pRes->row = 0; + } return 0; } @@ -1333,7 +1328,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { // set the data merge in progress int32_t prevStatus = - __sync_val_compare_and_swap_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS); + atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS); if (prevStatus != TSC_LOCALREDUCE_READY || pLocalReducer == NULL) { assert(prevStatus == TSC_LOCALREDUCE_TOBE_FREED); // it is in tscDestroyLocalReducer function already return TSDB_CODE_SUCCESS; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 6edec3d4773f94bd0c6de975653dfd56f6d4f8a8..37bbd93ec610692fc538d0a6b4d5524ddfb22cd7 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -13,12 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include - #include "os.h" #include "tcache.h" #include "trpc.h" @@ -30,7 +24,7 @@ #include "tsclient.h" #include "tscompression.h" #include "tsocket.h" -#include "tsql.h" +#include "tscSQLParser.h" #include "ttime.h" #include "ttimer.h" #include "tutil.h" @@ -65,6 +59,22 @@ void tscPrintMgmtIp() { } #endif +/* + * For each management node, try twice at least in case of poor network situation. + * If the client start to connect to a non-management node from the client, and the first retry may fail due to + * the poor network quality. And then, the second retry get the response with redirection command. + * The retry will not be executed since only *two* retry is allowed in case of single management node in the cluster. + * Therefore, we need to multiply the retry times by factor of 2 to fix this problem. + */ +static int32_t tscGetMgmtConnMaxRetryTimes() { + int32_t factor = 2; +#ifdef CLUSTER + return tscMgmtIpList.numOfIps * factor; +#else + return 1*factor; +#endif +} + void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { STscObj *pObj = (STscObj *)param; if (pObj == NULL) return; @@ -143,14 +153,14 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { STscObj *pTscObj = pSql->pTscObj; #ifdef CLUSTER - if (pSql->retry < tscMgmtIpList.numOfIps) { + if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { *pCode = 0; pSql->retry++; pSql->index = pSql->index % tscMgmtIpList.numOfIps; if (pSql->cmd.command > TSDB_SQL_READ && pSql->index == 0) pSql->index = 1; void *thandle = taosGetConnFromCache(tscConnCache, tscMgmtIpList.ip[pSql->index], TSC_MGMT_VNODE, pTscObj->user); #else - if (pSql->retry < 1) { + if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { *pCode = 0; pSql->retry++; void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, TSC_MGMT_VNODE, pTscObj->user); @@ -169,10 +179,11 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { connInit.spi = 1; connInit.encrypt = 0; connInit.secret = pSql->pTscObj->pass; + #ifdef CLUSTER connInit.peerIp = tscMgmtIpList.ipstr[pSql->index]; #else - connInit.peerIp = tsServerIpStr; + connInit.peerIp = tsServerIpStr; #endif thandle = taosOpenRpcConn(&connInit, pCode); } @@ -188,10 +199,18 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { pSql->vnode = TSC_MGMT_VNODE; #endif } + + // the pSql->res.code is the previous error(status) code. + if (pSql->thandle == NULL && pSql->retry >= pSql->maxRetry) { + if (pSql->res.code != TSDB_CODE_SUCCESS && pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS) { + *pCode = pSql->res.code; + } + + tscError("%p reach the max retry:%d, code:%d", pSql, pSql->retry, *pCode); + } } void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { - char ipstr[40] = {0}; SVPeerDesc *pVPeersDesc = NULL; static int vidIndex = 0; STscObj * pTscObj = pSql->pTscObj; @@ -202,7 +221,7 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // multiple vnode query - SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex); if (vnodeList != NULL) { pVPeersDesc = vnodeList->vpeerDesc; } @@ -224,11 +243,12 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { while (pSql->retry < pSql->maxRetry) { (pSql->retry)++; #ifdef CLUSTER + char ipstr[40] = {0}; if (pVPeersDesc[pSql->index].ip == 0) { (pSql->index) = (pSql->index + 1) % TSDB_VNODES_SUPPORT; continue; } - *pCode = 0; + *pCode = TSDB_CODE_SUCCESS; void *thandle = taosGetConnFromCache(tscConnCache, pVPeersDesc[pSql->index].ip, pVPeersDesc[pSql->index].vnode, pTscObj->user); @@ -254,7 +274,7 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { pSql->thandle = thandle; pSql->ip = pVPeersDesc[pSql->index].ip; pSql->vnode = pVPeersDesc[pSql->index].vnode; - tscTrace("%p vnode:%d ip:0x%x index:%d is picked up, pConn:%p", pSql, pVPeersDesc[pSql->index].vnode, + tscTrace("%p vnode:%d ip:%p index:%d is picked up, pConn:%p", pSql, pVPeersDesc[pSql->index].vnode, pVPeersDesc[pSql->index].ip, pSql->index, pSql->thandle); #else *pCode = 0; @@ -284,6 +304,15 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { break; } + + // the pSql->res.code is the previous error(status) code. + if (pSql->thandle == NULL && pSql->retry >= pSql->maxRetry) { + if (pSql->res.code != TSDB_CODE_SUCCESS && pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS) { + *pCode = pSql->res.code; + } + + tscError("%p reach the max retry:%d, code:%d", pSql, pSql->retry, *pCode); + } } int tscSendMsgToServer(SSqlObj *pSql) { @@ -319,11 +348,19 @@ int tscSendMsgToServer(SSqlObj *pSql) { char *pStart = taosBuildReqHeader(pSql->thandle, pSql->cmd.msgType, buf); if (pStart) { + /* + * this SQL object may be released by other thread due to the completion of this query even before the log + * is dumped to log file. So the signature needs to be kept in a local variable. + */ + uint64_t signature = (uint64_t) pSql->signature; if (tscUpdateVnodeMsg[pSql->cmd.command]) (*tscUpdateVnodeMsg[pSql->cmd.command])(pSql, buf); + int ret = taosSendMsgToPeerH(pSql->thandle, pStart, pSql->cmd.payloadLen, pSql); - - if (ret >= 0) code = 0; - tscTrace("%p send msg ret:%d code:%d sig:%p", pSql, ret, code, pSql->signature); + if (ret >= 0) { + code = 0; + } + + tscTrace("%p send msg ret:%d code:%d sig:%p", pSql, ret, code, signature); } } @@ -391,14 +428,11 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { // for single node situation, do NOT try next index #endif pSql->thandle = NULL; - // todo taos_stop_query() in async model /* * in case of - * 1. query cancelled(pRes->code != TSDB_CODE_QUERY_CANCELLED), do NOT re-issue the - * request to server. - * 2. retrieve, do NOT re-issue the retrieve request since the qhandle may - * have been released by server + * 1. query cancelled(pRes->code != TSDB_CODE_QUERY_CANCELLED), do NOT re-issue the request to server. + * 2. retrieve, do NOT re-issue the retrieve request since the qhandle may have been released by server */ if (pCmd->command != TSDB_SQL_FETCH && pCmd->command != TSDB_SQL_RETRIEVE && pCmd->command != TSDB_SQL_KILL_QUERY && pRes->code != TSDB_CODE_QUERY_CANCELLED) { @@ -424,14 +458,20 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { } } } else { + uint16_t rspCode = pMsg->content[0]; + #ifdef CLUSTER - if (pMsg->content[0] == TSDB_CODE_REDIRECT) { + + if (rspCode == TSDB_CODE_REDIRECT) { tscTrace("%p it shall be redirected!", pSql); taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); pSql->thandle = NULL; if (pCmd->command > TSDB_SQL_MGMT) { tscProcessMgmtRedirect(pSql, pMsg->content + 1); + } else if (pCmd->command == TSDB_SQL_INSERT){ + pSql->index++; + pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; } else { pSql->index++; } @@ -439,38 +479,27 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { code = tscSendMsgToServer(pSql); if (code == 0) return pSql; msg = NULL; - } else if (pMsg->content[0] == TSDB_CODE_NOT_ACTIVE_SESSION || pMsg->content[0] == TSDB_CODE_NETWORK_UNAVAIL || - pMsg->content[0] == TSDB_CODE_INVALID_SESSION_ID) { + } else if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID || + rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID || + rspCode == TSDB_CODE_TABLE_ID_MISMATCH || rspCode == TSDB_CODE_NETWORK_UNAVAIL) { #else - if (pMsg->content[0] == TSDB_CODE_NOT_ACTIVE_SESSION || pMsg->content[0] == TSDB_CODE_NETWORK_UNAVAIL || - pMsg->content[0] == TSDB_CODE_INVALID_SESSION_ID) { + if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID || + rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID || + rspCode == TSDB_CODE_TABLE_ID_MISMATCH || rspCode == TSDB_CODE_NETWORK_UNAVAIL) { #endif pSql->thandle = NULL; taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); - - if (pMeterMetaInfo != NULL && UTIL_METER_IS_METRIC(pMeterMetaInfo) && - pMsg->content[0] == TSDB_CODE_NOT_ACTIVE_SESSION) { - /* - * for metric query, in case of any meter missing during query, sub-query of metric query will failed, - * causing metric query failed, and return TSDB_CODE_METRICMETA_EXPIRED code to app - */ - tscTrace("%p invalid meters id cause metric query failed, code:%d", pSql, pMsg->content[0]); - code = TSDB_CODE_METRICMETA_EXPIRED; - } else if ((pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_SELECT) && - pMsg->content[0] == TSDB_CODE_INVALID_SESSION_ID) { - /* - * session id is invalid(e.g., less than 0 or larger than maximum session per - * vnode) in submit/query msg, no retry - */ - code = TSDB_CODE_INVALID_QUERY_MSG; - } else if (pCmd->command == TSDB_SQL_CONNECT) { + + if (pCmd->command == TSDB_SQL_CONNECT) { code = TSDB_CODE_NETWORK_UNAVAIL; } else if (pCmd->command == TSDB_SQL_HB) { code = TSDB_CODE_NOT_READY; } else { - tscTrace("%p it shall renew meter meta, code:%d", pSql, pMsg->content[0]); + tscTrace("%p it shall renew meter meta, code:%d", pSql, rspCode); + pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; - + pSql->res.code = (uint8_t) rspCode; // keep the previous error code + code = tscRenewMeterMeta(pSql, pMeterMetaInfo->name); if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; @@ -482,7 +511,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { msg = NULL; } else { // for other error set and return to invoker - code = pMsg->content[0]; + code = rspCode; } } @@ -494,7 +523,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (pMeterMetaInfo->pMeterMeta) // it may be deleted pMeterMetaInfo->pMeterMeta->index = pSql->index; } else { - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pSql->cmd.vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex); pVnodeSidList->index = pSql->index; } } else { @@ -568,7 +597,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { void *taosres = tscKeepConn[command] ? pSql : NULL; code = pRes->code ? -pRes->code : pRes->numOfRows; - tscTrace("%p Async SQL result:%d taosres:%p", pSql, code, taosres); + tscTrace("%p Async SQL result:%d res:%p", pSql, code, taosres); /* * Whether to free sqlObj or not should be decided before call the user defined function, since this SqlObj @@ -605,7 +634,7 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu static int tscLaunchMetricSubQueries(SSqlObj *pSql); // todo merge with callback -int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeIdx, SJoinSubquerySupporter *pSupporter) { +int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySupporter *pSupporter) { SSqlCmd *pCmd = &pSql->cmd; pSql->res.qhandle = 0x1; @@ -618,12 +647,13 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId } } - SSqlObj *pNew = createSubqueryObj(pSql, vnodeIdx, tableIndex, tscJoinQueryCallback, pSupporter, NULL); + SSqlObj *pNew = createSubqueryObj(pSql, tableIndex, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } - + pSql->pSubs[pSql->numOfSubs++] = pNew; + assert(pSql->numOfSubs <= pSupporter->pState->numOfTotal); if (QUERY_IS_JOIN_QUERY(pCmd->type)) { addGroupInfoForSubquery(pSql, pNew, tableIndex); @@ -655,13 +685,11 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId SSqlExpr *pExpr = tscSqlExprGet(&pNew->cmd, 0); SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); - int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid); + int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pSupporter->tagCond, pMeterMetaInfo->pMeterMeta->uid); pExpr->param->i64Key = tagColIndex; pExpr->numOfParams = 1; - addRequiredTagColumn(pCmd, tagColIndex, 0); - // add the filter tag column for (int32_t i = 0; i < pSupporter->colList.numOfCols; ++i) { SColumnBase *pColBase = &pSupporter->colList.pColList[i]; @@ -673,7 +701,11 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId } else { pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; } - + +#ifdef _DEBUG_VIEW + tscPrintSelectClause(&pNew->cmd); +#endif + return tscProcessSql(pNew); } @@ -729,11 +761,18 @@ int tscProcessSql(SSqlObj *pSql) { #else pSql->maxRetry = 2; #endif + + // the pMeterMetaInfo cannot be NULL + if (pMeterMetaInfo == NULL) { + pSql->res.code = TSDB_CODE_OTHERS; + return pSql->res.code; + } + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { pSql->index = pMeterMetaInfo->pMeterMeta->index; - } else { // it must be the parent SSqlObj for metric query + } else { // it must be the parent SSqlObj for super table query if ((pSql->cmd.type & TSDB_QUERY_TYPE_SUBQUERY) != 0) { - int32_t idx = pSql->cmd.vnodeIdx; + int32_t idx = pMeterMetaInfo->vnodeIndex; SVnodeSidList *pSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, idx); pSql->index = pSidList->index; } @@ -761,7 +800,7 @@ int tscProcessSql(SSqlObj *pSql) { return pSql->res.code; } - int32_t code = tscLaunchJoinSubquery(pSql, i, 0, pSupporter); + int32_t code = tscLaunchJoinSubquery(pSql, i, pSupporter); if (code != TSDB_CODE_SUCCESS) { // failed to create subquery object, quit query tscDestroyJoinSupporter(pSupporter); pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; @@ -903,7 +942,7 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { trs->pOrderDescriptor = pDesc; trs->pState = pState; trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); - trs->vnodeIdx = i; + trs->subqueryIndex = i; trs->pParentSqlObj = pSql; trs->pFinalColModel = pModel; @@ -930,7 +969,7 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { pNew->cmd.tsBuf = tsBufClone(pSql->cmd.tsBuf); } - tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, pNew->cmd.vnodeIdx); + tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, trs->subqueryIndex); tscProcessSql(pNew); } @@ -979,7 +1018,7 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) { SSqlObj *pPObj = trsupport->pParentSqlObj; - int32_t idx = trsupport->vnodeIdx; + int32_t subqueryIndex = trsupport->subqueryIndex; assert(pSql != NULL); @@ -994,27 +1033,27 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq pSql->res.numOfRows = 0; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts tscTrace("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, trsupport->pState->code); + subqueryIndex, trsupport->pState->code); } if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query. - tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, idx); - tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, idx, - trsupport->pState->code); + tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, subqueryIndex); + tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, + subqueryIndex, trsupport->pState->code); } else { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && trsupport->pState->code == TSDB_CODE_SUCCESS) { /* * current query failed, and the retry count is less than the available * count, retry query clear previous retrieved data, then launch a new sub query */ - tExtMemBufferClear(trsupport->pExtMemBuffer[idx]); + tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]); // clear local saved number of results trsupport->localBuffer->numOfElems = 0; pthread_mutex_unlock(&trsupport->queryMutex); tscTrace("%p sub:%p retrieve failed, code:%d, orderOfSub:%d, retry:%d", trsupport->pParentSqlObj, pSql, numOfRows, - idx, trsupport->numOfRetry); + subqueryIndex, trsupport->numOfRetry); SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); if (pNew == NULL) { @@ -1029,13 +1068,13 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq tscProcessSql(pNew); return; } else { // reach the maximum retry count, abort - __sync_val_compare_and_swap_32(&trsupport->pState->code, TSDB_CODE_SUCCESS, numOfRows); + atomic_val_compare_exchange_32(&trsupport->pState->code, TSDB_CODE_SUCCESS, numOfRows); tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql, - numOfRows, idx, trsupport->pState->code); + numOfRows, subqueryIndex, trsupport->pState->code); } } - if (__sync_add_and_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { + if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { return tscFreeSubSqlObj(trsupport, pSql); } @@ -1074,13 +1113,12 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { SRetrieveSupport *trsupport = (SRetrieveSupport *)param; - int32_t idx = trsupport->vnodeIdx; + int32_t idx = trsupport->subqueryIndex; SSqlObj * pPObj = trsupport->pParentSqlObj; tOrderDescriptor *pDesc = trsupport->pOrderDescriptor; SSqlObj *pSql = (SSqlObj *)tres; - if (pSql == NULL) { - /* sql object has been released in error process, return immediately */ + if (pSql == NULL) { // sql object has been released in error process, return immediately tscTrace("%p subquery has been released, idx:%d, abort", pPObj, idx); return; } @@ -1101,7 +1139,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { if (numOfRows > 0) { assert(pRes->numOfRows == numOfRows); - __sync_add_and_fetch_64(&trsupport->pState->numOfRetrievedRows, numOfRows); + atomic_add_fetch_64(&trsupport->pState->numOfRetrievedRows, numOfRows); tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql, pRes->numOfRows, trsupport->pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx); @@ -1131,7 +1169,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { } else { // all data has been retrieved to client /* data in from current vnode is stored in cache and disk */ uint32_t numOfRowsFromVnode = - trsupport->pExtMemBuffer[pCmd->vnodeIdx]->numOfAllElems + trsupport->localBuffer->numOfElems; + trsupport->pExtMemBuffer[idx]->numOfAllElems + trsupport->localBuffer->numOfElems; tscTrace("%p sub:%p all data retrieved from ip:%u,vid:%d, numOfRows:%d, orderOfSub:%d", pPObj, pSql, pSvd->ip, pSvd->vnode, numOfRowsFromVnode, idx); @@ -1160,7 +1198,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); } - if (__sync_add_and_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { + if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { return tscFreeSubSqlObj(trsupport, pSql); } @@ -1244,10 +1282,16 @@ void tscKillMetricQuery(SSqlObj *pSql) { static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int retCode); static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) { - SSqlObj *pNew = createSubqueryObj(pSql, trsupport->vnodeIdx, 0, tscRetrieveDataRes, trsupport, prevSqlObj); + SSqlObj *pNew = createSubqueryObj(pSql, 0, tscRetrieveDataRes, trsupport, prevSqlObj); if (pNew != NULL) { // the sub query of two-stage super table query pNew->cmd.type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; - pSql->pSubs[trsupport->vnodeIdx] = pNew; + assert(pNew->cmd.numOfTables == 1); + + //launch subquery for each vnode, so the subquery index equals to the vnodeIndex. + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); + pMeterMetaInfo->vnodeIndex = trsupport->subqueryIndex; + + pSql->pSubs[trsupport->subqueryIndex] = pNew; } return pNew; @@ -1257,8 +1301,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { SRetrieveSupport *trsupport = (SRetrieveSupport *)param; SSqlObj * pSql = (SSqlObj *)tres; - int32_t idx = pSql->cmd.vnodeIdx; SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + int32_t idx = pMeterMetaInfo->vnodeIndex; SVnodeSidList *vnodeInfo = NULL; SVPeerDesc * pSvd = NULL; @@ -1276,7 +1320,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { code = trsupport->pState->code; } tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, code); + trsupport->subqueryIndex, code); } /* @@ -1289,14 +1333,14 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (code != TSDB_CODE_SUCCESS) { if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) { tscTrace("%p sub:%p reach the max retry count,set global code:%d", trsupport->pParentSqlObj, pSql, code); - __sync_val_compare_and_swap_32(&trsupport->pState->code, 0, code); + atomic_val_compare_exchange_32(&trsupport->pState->code, 0, code); } else { // does not reach the maximum retry count, go on tscTrace("%p sub:%p failed code:%d, retry:%d", trsupport->pParentSqlObj, pSql, code, trsupport->numOfRetry); SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d", - trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->vnodeIdx); + trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->subqueryIndex); trsupport->pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -1312,17 +1356,17 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (vnodeInfo != NULL) { tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, - trsupport->vnodeIdx, trsupport->pState->code); + trsupport->subqueryIndex, trsupport->pState->code); } else { tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, trsupport->pState->code); + trsupport->subqueryIndex, trsupport->pState->code); } tscRetrieveFromVnodeCallBack(param, tres, trsupport->pState->code); } else { // success, proceed to retrieve data from dnode tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, - trsupport->vnodeIdx); + trsupport->subqueryIndex); taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param); } @@ -1338,7 +1382,7 @@ int tscBuildRetrieveMsg(SSqlObj *pSql) { *((uint64_t *)pMsg) = pSql->res.qhandle; pMsg += sizeof(pSql->res.qhandle); - *pMsg = htons(pSql->cmd.type); + *((uint16_t*)pMsg) = htons(pSql->cmd.type); pMsg += sizeof(pSql->cmd.type); msgLen = pMsg - pStart; @@ -1359,7 +1403,7 @@ void tscUpdateVnodeInSubmitMsg(SSqlObj *pSql, char *buf) { pShellMsg = (SShellSubmitMsg *)pMsg; pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); - tscTrace("%p update submit msg vnode:%d", pSql, htons(pShellMsg->vnode)); + tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pSql->index].ip), htons(pShellMsg->vnode)); } int tscBuildSubmitMsg(SSqlObj *pSql) { @@ -1374,13 +1418,13 @@ int tscBuildSubmitMsg(SSqlObj *pSql) { pMsg = pStart; pShellMsg = (SShellSubmitMsg *)pMsg; - pShellMsg->import = pSql->cmd.order.order; + pShellMsg->import = pSql->cmd.import; pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode); pShellMsg->numOfSid = htonl(pSql->cmd.count); // number of meters to be inserted // pSql->cmd.payloadLen is set during parse sql routine, so we do not use it here pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT; - tscTrace("%p update submit msg vnode:%d", pSql, htons(pShellMsg->vnode)); + tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pMeterMeta->index].ip), htons(pShellMsg->vnode)); return msgLen; } @@ -1397,7 +1441,7 @@ void tscUpdateVnodeInQueryMsg(SSqlObj *pSql, char *buf) { pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); } else { // query on metric SMetricMeta * pMetricMeta = pMeterMetaInfo->pMetricMeta; - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); pQueryMsg->vnode = htons(pVnodeSidList->vpeerDesc[pSql->index].vnode); } } @@ -1420,7 +1464,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) { SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); int32_t meterInfoSize = (pMetricMeta->tagLen + sizeof(SMeterSidExtInfo)) * pVnodeSidList->numOfSids; int32_t outputColumnSize = pCmd->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg); @@ -1433,6 +1477,46 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) { return size; } +static char* doSerializeTableInfo(SSqlObj* pSql, int32_t numOfMeters, int32_t vnodeId, char* pMsg) { + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; + SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; + + tscTrace("%p vid:%d, query on %d meters", pSql, htons(vnodeId), numOfMeters); + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { +#ifdef _DEBUG_VIEW + tscTrace("%p sid:%d, uid:%lld", pSql, pMeterMetaInfo->pMeterMeta->sid, pMeterMetaInfo->pMeterMeta->uid); +#endif + SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg; + pMeterInfo->sid = htonl(pMeterMeta->sid); + pMeterInfo->uid = htobe64(pMeterMeta->uid); + + pMsg += sizeof(SMeterSidExtInfo); + } else { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); + + for (int32_t i = 0; i < numOfMeters; ++i) { + SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg; + SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i); + + pMeterInfo->sid = htonl(pQueryMeterInfo->sid); + pMeterInfo->uid = htobe64(pQueryMeterInfo->uid); + + pMsg += sizeof(SMeterSidExtInfo); + + memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen); + pMsg += pMetricMeta->tagLen; + +#ifdef _DEBUG_VIEW + tscTrace("%p sid:%d, uid:%lld", pSql, pQueryMeterInfo->sid, pQueryMeterInfo->uid); +#endif + } + } + + return pMsg; +} + int tscBuildQueryMsg(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; @@ -1463,14 +1547,13 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode); pQueryMsg->uid = pMeterMeta->uid; pQueryMsg->numOfTagsCols = 0; - } else { // query on metric - SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; - if (pCmd->vnodeIdx < 0) { - tscError("%p error vnodeIdx:%d", pSql, pCmd->vnodeIdx); + } else { // query on super table + if (pMeterMetaInfo->vnodeIndex < 0) { + tscError("%p error vnodeIdx:%d", pSql, pMeterMetaInfo->vnodeIndex); return -1; } - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); uint32_t vnodeId = pVnodeSidList->vpeerDesc[pVnodeSidList->index].vnode; numOfMeters = pVnodeSidList->numOfSids; @@ -1651,34 +1734,8 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->colNameLen = htonl(len); - // set sids list - tscTrace("%p vid:%d, query on %d meters", pSql, pSql->cmd.vnodeIdx, numOfMeters); - if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { -#ifdef _DEBUG_VIEW - - tscTrace("%p %d", pSql, pMeterMetaInfo->pMeterMeta->sid); -#endif - SMeterSidExtInfo *pSMeterTagInfo = (SMeterSidExtInfo *)pMsg; - pSMeterTagInfo->sid = htonl(pMeterMeta->sid); - pMsg += sizeof(SMeterSidExtInfo); - } else { - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); - - for (int32_t i = 0; i < numOfMeters; ++i) { - SMeterSidExtInfo *pMeterTagInfo = (SMeterSidExtInfo *)pMsg; - SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i); - - pMeterTagInfo->sid = htonl(pQueryMeterInfo->sid); - pMsg += sizeof(SMeterSidExtInfo); - -#ifdef _DEBUG_VIEW - tscTrace("%p %d", pSql, pQueryMeterInfo->sid); -#endif - - memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen); - pMsg += pMetricMeta->tagLen; - } - } + // serialize the table info (sid, uid, tags) + pMsg = doSerializeTableInfo(pSql, numOfMeters, htons(pQueryMsg->vnode), pMsg); // only include the required tag column schema. If a tag is not required, it won't be sent to vnode if (pMeterMetaInfo->numOfTags > 0) { @@ -1733,7 +1790,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) { int32_t numOfBlocks = 0; if (pCmd->tsBuf != NULL) { - STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pCmd->vnodeIdx); + STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pMeterMetaInfo->vnodeIndex); assert(QUERY_IS_JOIN_QUERY(pCmd->type) && pBlockInfo != NULL); // this query should not be sent // todo refactor @@ -1778,7 +1835,7 @@ int tscBuildCreateDbMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); pCreateDbMsg = (SCreateDbMsg *)pMsg; - strcpy(pCreateDbMsg->db, pMeterMetaInfo->name); + strncpy(pCreateDbMsg->db, pMeterMetaInfo->name, tListLen(pCreateDbMsg->db)); pMsg += sizeof(SCreateDbMsg); msgLen = pMsg - pStart; @@ -2000,7 +2057,7 @@ int tscBuildDropDbMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); pDropDbMsg = (SDropDbMsg *)pMsg; - strcpy(pDropDbMsg->db, pMeterMetaInfo->name); + strncpy(pDropDbMsg->db, pMeterMetaInfo->name, tListLen(pDropDbMsg->db)); pDropDbMsg->ignoreNotExists = htons(pCmd->existsCheck ? 1 : 0); @@ -2134,7 +2191,7 @@ int tscBuildShowMsg(SSqlObj *pSql) { pShowMsg = (SShowMsg *)pMsg; pShowMsg->type = pCmd->showType; - if ((pShowMsg->type == TSDB_MGMT_TABLE_TABLE || pShowMsg->type == TSDB_MGMT_TABLE_METRIC) && pCmd->payloadLen != 0) { + if ((pShowMsg->type == TSDB_MGMT_TABLE_TABLE || pShowMsg->type == TSDB_MGMT_TABLE_METRIC || pShowMsg->type == TSDB_MGMT_TABLE_VNODES ) && pCmd->payloadLen != 0) { // only show tables support wildcard query pShowMsg->payloadLen = htons(pCmd->payloadLen); memcpy(pShowMsg->payload, payload, pCmd->payloadLen); @@ -2269,6 +2326,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql) { size = tscEstimateCreateTableMsgLength(pSql); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for create table msg", pSql); + free(tmpData); return -1; } @@ -2466,10 +2524,10 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); - *((uint64_t *)pMsg) = pSql->res.qhandle; + *((uint64_t *) pMsg) = pSql->res.qhandle; pMsg += sizeof(pSql->res.qhandle); - *pMsg = htons(pCmd->type); + *((uint16_t*) pMsg) = htons(pCmd->type); pMsg += sizeof(pCmd->type); msgLen = pMsg - pStart; @@ -2601,6 +2659,8 @@ int tscBuildConnectMsg(SSqlObj *pSql) { db = (db == NULL) ? pObj->db : db + 1; strcpy(pConnect->db, db); + strcpy(pConnect->clientVersion, version); + pMsg += sizeof(SConnectMsg); msgLen = pMsg - pStart; @@ -2700,10 +2760,14 @@ static int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) { int32_t n = 0; for (int32_t i = 0; i < pCmd->tagCond.numOfTagCond; ++i) { - n += pCmd->tagCond.cond[i].cond.n; + n += strlen(pCmd->tagCond.cond[i].cond); } - int32_t tagLen = n * TSDB_NCHAR_SIZE + pCmd->tagCond.tbnameCond.cond.n * TSDB_NCHAR_SIZE; + int32_t tagLen = n * TSDB_NCHAR_SIZE; + if (pCmd->tagCond.tbnameCond.cond != NULL) { + tagLen += strlen(pCmd->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE; + } + int32_t joinCondLen = (TSDB_METER_ID_LEN + sizeof(int16_t)) * 2; int32_t elemSize = sizeof(SMetricMetaElemMsg) * pCmd->numOfTables; @@ -2775,8 +2839,9 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { if (pTagCond->numOfTagCond > 0) { SCond *pCond = tsGetMetricQueryCondPos(pTagCond, uid); if (pCond != NULL) { - condLen = pCond->cond.n + 1; - bool ret = taosMbsToUcs4(pCond->cond.z, pCond->cond.n, pMsg, pCond->cond.n * TSDB_NCHAR_SIZE); + condLen = strlen(pCond->cond) + 1; + + bool ret = taosMbsToUcs4(pCond->cond, condLen, pMsg, condLen * TSDB_NCHAR_SIZE); if (!ret) { tscError("%p mbs to ucs4 failed:%s", pSql, tsGetMetricQueryCondPos(pTagCond, uid)); return 0; @@ -2795,15 +2860,17 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { offset = pMsg - (char *)pMetaMsg; pElem->tableCond = htonl(offset); - pElem->tableCondLen = htonl(pTagCond->tbnameCond.cond.n); + + uint32_t len = strlen(pTagCond->tbnameCond.cond); + pElem->tableCondLen = htonl(len); - memcpy(pMsg, pTagCond->tbnameCond.cond.z, pTagCond->tbnameCond.cond.n); - pMsg += pTagCond->tbnameCond.cond.n; + memcpy(pMsg, pTagCond->tbnameCond.cond, len); + pMsg += len; } SSqlGroupbyExpr *pGroupby = &pCmd->groupbyExpr; - if (pGroupby->tableIndex != i) { + if (pGroupby->tableIndex != i && pGroupby->numOfGroupCols > 0) { pElem->orderType = 0; pElem->orderIndex = 0; pElem->numOfGroupCols = 0; @@ -2821,15 +2888,14 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { pElem->groupbyTagColumnList = htonl(offset); for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { SColIndexEx *pCol = &pCmd->groupbyExpr.columnInfo[j]; - - *((int16_t *)pMsg) = pCol->colId; - pMsg += sizeof(pCol->colId); - - *((int16_t *)pMsg) += pCol->colIdx; - pMsg += sizeof(pCol->colIdx); - - *((int16_t *)pMsg) += pCol->flag; - pMsg += sizeof(pCol->flag); + SColIndexEx* pDestCol = (SColIndexEx*) pMsg; + + pDestCol->colIdxInBuf = 0; + pDestCol->colIdx = htons(pCol->colIdx); + pDestCol->colId = htons(pDestCol->colId); + pDestCol->flag = htons(pDestCol->flag); + + pMsg += sizeof(SColIndexEx); } } } @@ -2848,7 +2914,7 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { return msgLen; } -int tscEstimateBuildHeartBeatMsgLength(SSqlObj *pSql) { +int tscEstimateHeartBeatMsgLength(SSqlObj *pSql) { int size = 0; STscObj *pObj = pSql->pTscObj; @@ -2881,7 +2947,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql) { pthread_mutex_lock(&pObj->mutex); - size = tscEstimateBuildHeartBeatMsgLength(pSql); + size = tscEstimateHeartBeatMsgLength(pSql); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for heartbeat msg", pSql); return -1; @@ -3171,44 +3237,47 @@ int tscProcessMetricMetaRsp(SSqlObj *pSql) { size += pMeta->numOfVnodes * sizeof(SVnodeSidList *) + pMeta->numOfMeters * sizeof(SMeterSidExtInfo *); - char *pStr = calloc(1, size); - if (pStr == NULL) { + char *pBuf = calloc(1, size); + if (pBuf == NULL) { pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; goto _error_clean; } - SMetricMeta *pNewMetricMeta = (SMetricMeta *)pStr; + SMetricMeta *pNewMetricMeta = (SMetricMeta *)pBuf; metricMetaList[k] = pNewMetricMeta; pNewMetricMeta->numOfMeters = pMeta->numOfMeters; pNewMetricMeta->numOfVnodes = pMeta->numOfVnodes; pNewMetricMeta->tagLen = pMeta->tagLen; - pStr = pStr + sizeof(SMetricMeta) + pNewMetricMeta->numOfVnodes * sizeof(SVnodeSidList *); + pBuf = pBuf + sizeof(SMetricMeta) + pNewMetricMeta->numOfVnodes * sizeof(SVnodeSidList *); for (int32_t i = 0; i < pMeta->numOfVnodes; ++i) { SVnodeSidList *pSidLists = (SVnodeSidList *)rsp; - memcpy(pStr, pSidLists, sizeof(SVnodeSidList)); + memcpy(pBuf, pSidLists, sizeof(SVnodeSidList)); - pNewMetricMeta->list[i] = pStr - (char *)pNewMetricMeta; // offset value - SVnodeSidList *pLists = (SVnodeSidList *)pStr; + pNewMetricMeta->list[i] = pBuf - (char *)pNewMetricMeta; // offset value + SVnodeSidList *pLists = (SVnodeSidList *)pBuf; tscTrace("%p metricmeta:vid:%d,numOfMeters:%d", pSql, i, pLists->numOfSids); - pStr += sizeof(SVnodeSidList) + sizeof(SMeterSidExtInfo *) * pSidLists->numOfSids; + pBuf += sizeof(SVnodeSidList) + sizeof(SMeterSidExtInfo *) * pSidLists->numOfSids; rsp += sizeof(SVnodeSidList); - size_t sidSize = sizeof(SMeterSidExtInfo) + pNewMetricMeta->tagLen; + size_t elemSize = sizeof(SMeterSidExtInfo) + pNewMetricMeta->tagLen; for (int32_t j = 0; j < pSidLists->numOfSids; ++j) { - pLists->pSidExtInfoList[j] = pStr - (char *)pLists; - memcpy(pStr, rsp, sidSize); - - rsp += sidSize; - pStr += sidSize; + pLists->pSidExtInfoList[j] = pBuf - (char *)pLists; + memcpy(pBuf, rsp, elemSize); + + ((SMeterSidExtInfo*) pBuf)->uid = htobe64(((SMeterSidExtInfo*) pBuf)->uid); + ((SMeterSidExtInfo*) pBuf)->sid = htonl(((SMeterSidExtInfo*) pBuf)->sid); + + rsp += elemSize; + pBuf += elemSize; } } - sizes[k] = pStr - (char *)pNewMetricMeta; + sizes[k] = pBuf - (char *)pNewMetricMeta; } for (int32_t i = 0; i < num; ++i) { @@ -3300,7 +3369,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { } int tscProcessConnectRsp(SSqlObj *pSql) { - char temp[TSDB_METER_ID_LEN]; + char temp[TSDB_METER_ID_LEN*2]; SConnectRsp *pConnect; STscObj *pObj = pSql->pTscObj; @@ -3308,8 +3377,11 @@ int tscProcessConnectRsp(SSqlObj *pSql) { pConnect = (SConnectRsp *)pRes->pRsp; strcpy(pObj->acctId, pConnect->acctId); // copy acctId from response - sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); - strcpy(pObj->db, temp); + int32_t len =sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); + + assert(len <= tListLen(pObj->db)); + strncpy(pObj->db, temp, tListLen(pObj->db)); + #ifdef CLUSTER SIpList * pIpList; char *rsp = pRes->pRsp + sizeof(SConnectRsp); @@ -3412,31 +3484,6 @@ int tscProcessQueryRsp(SSqlObj *pSql) { return 0; } -static void doDecompressPayload(SSqlCmd *pCmd, SSqlRes *pRes, int16_t compressed) { - if (compressed && pRes->numOfRows > 0) { - SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)pRes->pRsp; - - int32_t numOfTotalCols = pCmd->fieldsInfo.numOfOutputCols + pCmd->fieldsInfo.numOfHiddenCols; - int32_t rowSize = pCmd->fieldsInfo.pOffset[numOfTotalCols - 1] + pCmd->fieldsInfo.pFields[numOfTotalCols - 1].bytes; - - // TODO handle the OOM problem - char * buf = malloc(rowSize * pRes->numOfRows); - - int32_t payloadSize = pRes->rspLen - 1 - sizeof(SRetrieveMeterRsp); - assert(payloadSize > 0); - - int32_t decompressedSize = tsDecompressString(pRetrieve->data, payloadSize, 1, buf, rowSize * pRes->numOfRows, 0, 0, 0); - assert(decompressedSize == rowSize * pRes->numOfRows); - - pRes->pRsp = realloc(pRes->pRsp, pRes->rspLen - payloadSize + decompressedSize); - memcpy(pRes->pRsp + sizeof(SRetrieveMeterRsp), buf, decompressedSize); - - free(buf); - } - - pRes->data = ((SRetrieveMeterRsp *)pRes->pRsp)->data; -} - int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; @@ -3449,14 +3496,18 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { pRes->offset = htobe64(pRetrieve->offset); pRes->useconds = htobe64(pRetrieve->useconds); - pRetrieve->compress = htons(pRetrieve->compress); - - doDecompressPayload(pCmd, pRes, pRetrieve->compress); + pRes->data = pRetrieve->data; tscSetResultPointer(pCmd, pRes); pRes->row = 0; - if (pRes->numOfRows == 0 && !(tscProjectionQueryOnMetric(pCmd) && pRes->offset > 0)) { + /** + * If the query result is exhausted, or current query is to free resource at server side, + * the connection will be recycled. + */ + if ((pRes->numOfRows == 0 && !(tscProjectionQueryOnMetric(pCmd) && pRes->offset > 0)) || + ((pCmd->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE)) { + tscTrace("%p no result or free resource, recycle connection", pSql); taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); pSql->thandle = NULL; } else { @@ -3558,9 +3609,9 @@ int tscGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { * for async insert operation, release data block buffer before issue new object to get metermeta * because in metermeta callback function, the tscParse function will generate the submit data blocks */ - if (pSql->fp != NULL && pSql->pStream == NULL) { - tscFreeSqlCmdData(pCmd); - } + //if (pSql->fp != NULL && pSql->pStream == NULL) { + // tscFreeSqlCmdData(pCmd); + //} return tscDoGetMeterMeta(pSql, meterId, index); } @@ -3605,7 +3656,7 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { */ if (pMeterMetaInfo->pMeterMeta == NULL || !tscQueryOnMetric(pCmd)) { if (pMeterMetaInfo->pMeterMeta) { - tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%d, addr:%p", pSql, + tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql, pMeterMetaInfo->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid, pMeterMetaInfo->pMeterMeta); } tscWaitingForCreateTable(&pSql->cmd); @@ -3613,7 +3664,7 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { code = tscDoGetMeterMeta(pSql, meterId, 0); // todo ?? } else { - tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%d, addr:%p", pSql, + tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql, pMeterMetaInfo->pMeterMeta->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid, pMeterMetaInfo->pMeterMeta); } @@ -3770,9 +3821,16 @@ void tscInitMsgs() { tscProcessMsgRsp[TSDB_SQL_MULTI_META] = tscProcessMultiMeterMetaRsp; tscProcessMsgRsp[TSDB_SQL_SHOW] = tscProcessShowRsp; - tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromMgmt; + tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromVnode; // rsp handled by same function. tscProcessMsgRsp[TSDB_SQL_DESCRIBE_TABLE] = tscProcessDescribeTableRsp; + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_TAGS] = tscProcessTagRetrieveRsp; + tscProcessMsgRsp[TSDB_SQL_CURRENT_DB] = tscProcessTagRetrieveRsp; + tscProcessMsgRsp[TSDB_SQL_CURRENT_USER] = tscProcessTagRetrieveRsp; + tscProcessMsgRsp[TSDB_SQL_SERV_VERSION] = tscProcessTagRetrieveRsp; + tscProcessMsgRsp[TSDB_SQL_CLI_VERSION] = tscProcessTagRetrieveRsp; + tscProcessMsgRsp[TSDB_SQL_SERV_STATUS] = tscProcessTagRetrieveRsp; + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp; tscProcessMsgRsp[TSDB_SQL_RETRIEVE_METRIC] = tscProcessRetrieveMetricRsp; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index e6714bb4cb25747a10c695e9261d5e5dcfc47137..4b78c5ed7a07779f735089c1b029f58dda32afce 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -13,26 +13,24 @@ * along with this program. If not, see . */ -#include -#include - #include "os.h" #include "tcache.h" #include "tlog.h" +#include "tnote.h" #include "trpc.h" #include "tscJoinProcess.h" #include "tscProfile.h" +#include "tscSQLParser.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tsclient.h" #include "tscompression.h" #include "tsocket.h" -#include "tsql.h" #include "ttimer.h" #include "tutil.h" -TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, int port, void (*fp)(void *, TAOS_RES *, int), - void *param, void **taos) { +TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, + void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { STscObj *pObj; taos_init(); @@ -66,10 +64,6 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const #ifdef CLUSTER if (ip && ip[0]) { - tscMgmtIpList.numOfIps = 2; - strcpy(tscMgmtIpList.ipstr[0], ip); - tscMgmtIpList.ip[0] = inet_addr(ip); - strcpy(tscMgmtIpList.ipstr[1], ip); tscMgmtIpList.ip[1] = inet_addr(ip); } @@ -87,7 +81,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; return NULL; } - + memset(pObj, 0, sizeof(STscObj)); pObj->signature = pObj; @@ -119,7 +113,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const free(pObj); return NULL; } - + memset(pSql, 0, sizeof(SSqlObj)); pSql->pTscObj = pObj; pSql->signature = pSql; @@ -156,10 +150,10 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const return pObj; } -TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, int port) { - if (ip != NULL && (strcmp("127.0.0.1", ip) == 0 || strcasecmp("localhost", ip) == 0)) { +TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) { + if (ip == NULL || (ip != NULL && (strcmp("127.0.0.1", ip) == 0 || strcasecmp("localhost", ip) == 0))) { #ifdef CLUSTER - ip = tsPrivateIp; + ip = tsMasterIp; #else ip = tsServerIpStr; #endif @@ -168,47 +162,21 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha void *taos = taos_connect_imp(ip, user, pass, db, port, NULL, NULL, NULL); if (taos != NULL) { - STscObj* pObj = (STscObj*) taos; + STscObj *pObj = (STscObj *)taos; // version compare only requires the first 3 segments of the version string - int32_t comparedSegments = 3; - char client_version[64] = {0}; - char server_version[64] = {0}; - int clientVersionNumber[4] = {0}; - int serverVersionNumber[4] = {0}; - - strcpy(client_version, version); - strcpy(server_version, taos_get_server_info(taos)); - - if (!taosGetVersionNumber(client_version, clientVersionNumber)) { - tscError("taos:%p, invalid client version:%s", taos, client_version); - pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION; + int code = taosCheckVersion(version, taos_get_server_info(taos), 3); + if (code != 0) { + pObj->pSql->res.code = code; taos_close(taos); return NULL; } - - if (!taosGetVersionNumber(server_version, serverVersionNumber)) { - tscError("taos:%p, invalid server version:%s", taos, server_version); - pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION; - taos_close(taos); - return NULL; - } - - for(int32_t i = 0; i < comparedSegments; ++i) { - if (clientVersionNumber[i] != serverVersionNumber[i]) { - tscError("taos:%p, the %d-th number of server version:%s not matched with client version:%s, close connection", - taos, i, server_version, version); - pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION; - taos_close(taos); - return NULL; - } - } } return taos; } -TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, int port, void (*fp)(void *, TAOS_RES *, int), +TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { #ifndef CLUSTER if (ip == NULL) { @@ -231,11 +199,17 @@ void taos_close(TAOS *taos) { } } -int taos_query_imp(STscObj* pObj, SSqlObj* pSql) { +int taos_query_imp(STscObj *pObj, SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; pRes->numOfRows = 1; pRes->numOfTotal = 0; + pSql->asyncTblPos = NULL; + if (NULL != pSql->pTableHashList) { + taosCleanUpIntHash(pSql->pTableHashList); + pSql->pTableHashList = NULL; + } + tscTrace("%p SQL: %s pObj:%p", pSql, pSql->sqlstr, pObj); pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); @@ -248,11 +222,16 @@ int taos_query_imp(STscObj* pObj, SSqlObj* pSql) { pRes->qhandle = 0; pSql->thandle = NULL; - if (pRes->code != TSDB_CODE_SUCCESS) return pRes->code; + if (pRes->code == TSDB_CODE_SUCCESS) { + tscDoQuery(pSql); + } - tscDoQuery(pSql); + if (pRes->code == TSDB_CODE_SUCCESS) { + tscTrace("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pObj), pObj); + } else { + tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pObj), pObj); + } - tscTrace("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pObj), pObj); if (pRes->code != TSDB_CODE_SUCCESS) { tscFreeSqlObjPartial(pSql); } @@ -271,17 +250,22 @@ int taos_query(TAOS *taos, const char *sqlstr) { SSqlRes *pRes = &pSql->res; size_t sqlLen = strlen(sqlstr); - if (sqlLen > TSDB_MAX_SQL_LEN) { - tscError("%p sql too long", pSql); - pRes->code = TSDB_CODE_INVALID_SQL; + if (sqlLen > tsMaxSQLStringLen) { + pRes->code = + tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql + tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + return pRes->code; } + taosNotePrintTsc(sqlstr); + void *sql = realloc(pSql->sqlstr, sqlLen + 1); if (sql == NULL) { pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; - tscError("%p failed to malloc sql string buffer", pSql); - tscTrace("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + tscError("%p failed to malloc sql string buffer, reason:%s", pSql, strerror(errno)); + + tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); return pRes->code; } @@ -448,25 +432,56 @@ static void **getOneRowFromBuf(SSqlObj *pSql) { return pRes->tsrow; } -static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { +static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) { + bool hasData = true; SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - while (1) { - bool hasData = true; + if (tscProjectionQueryOnMetric(pCmd)) { + bool allSubqueryExhausted = true; for (int32_t i = 0; i < pSql->numOfSubs; ++i) { SSqlRes *pRes1 = &pSql->pSubs[i]->res; + SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd; - // in case inner join, if any subquery exhausted, query completed - if (pRes1->numOfRows == 0) { + SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); + assert(pCmd1->numOfTables == 1); + + /* + * if the global limitation is not reached, and current result has not exhausted, or next more vnodes are + * available, go on + */ + if (pMetaInfo->vnodeIndex < pMetaInfo->pMetricMeta->numOfVnodes && pRes1->row < pRes1->numOfRows && + (!tscHasReachLimitation(pSql->pSubs[i]))) { + allSubqueryExhausted = false; + break; + } + } + + hasData = !allSubqueryExhausted; + } else { // otherwise, in case inner join, if any subquery exhausted, query completed. + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlRes *pRes1 = &pSql->pSubs[i]->res; + + if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pSql->pSubs[i]) && + tscProjectionQueryOnTable(&pSql->pSubs[i]->cmd)) || + (pRes1->numOfRows == 0)) { + hasData = false; break; } } + } + + return hasData; +} + +static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; - if (!hasData) { // free all sub sqlobj - tscTrace("%p one subquery exhausted, free other %d subquery", pSql, pSql->numOfSubs - 1); + while (1) { + if (!tscHashRemainDataInSubqueryResultSet(pSql)) { // free all sub sqlobj + tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1); SSubqueryState *pState = NULL; @@ -484,41 +499,32 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { } if (pRes->tsrow == NULL) { - pRes->tsrow = malloc(sizeof(void *) * pCmd->exprsInfo.numOfExprs); + pRes->tsrow = malloc(POINTER_BYTES * pCmd->exprsInfo.numOfExprs); } bool success = false; - if (pSql->numOfSubs >= 2) { - // do merge result + if (pSql->numOfSubs >= 2) { // do merge result SSqlRes *pRes1 = &pSql->pSubs[0]->res; SSqlRes *pRes2 = &pSql->pSubs[1]->res; - while (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) { + if (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) { doSetResultRowData(pSql->pSubs[0]); doSetResultRowData(pSql->pSubs[1]); - - TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; - TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; - - if (key1 == key2) { - success = true; - pRes1->row++; - pRes2->row++; - break; - } else if (key1 < key2) { - pRes1->row++; - } else if (key1 > key2) { - pRes2->row++; - } + // TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; + // TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; + // printf("first:%" PRId64 ", second:%" PRId64 "\n", key1, key2); + success = true; + pRes1->row++; + pRes2->row++; } - } else { + } else { // only one subquery SSqlRes *pRes1 = &pSql->pSubs[0]->res; doSetResultRowData(pSql->pSubs[0]); success = (pRes1->row++ < pRes1->numOfRows); } - if (success) { + if (success) { // current row of final output has been built, return to app for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { int32_t tableIndex = pRes->pColumnIndex[i].tableIndex; int32_t columnIndex = pRes->pColumnIndex[i].columnIndex; @@ -528,7 +534,7 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { } break; - } else { + } else { // continue retrieve data from vnode tscFetchDatablockFromSubquery(pSql); if (pRes->code != TSDB_CODE_SUCCESS) { return NULL; @@ -550,9 +556,12 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) { if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) { tscFetchDatablockFromSubquery(pSql); + if (pRes->code == TSDB_CODE_SUCCESS) { + tscTrace("%p data from all subqueries have been retrieved to client", pSql); return tscJoinResultsetFromBuf(pSql); } else { + tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code); return NULL; } @@ -593,7 +602,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); // reach the maximum number of output rows, abort - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { + if (tscHasReachLimitation(pSql)) { return NULL; } @@ -606,7 +615,15 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); - if ((++pCmd->vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + /* + * For project query with super table join, the numOfSub is equalled to the number of all subqueries, so + * we need to reset the value of numOfSubs to be 0. + * + * For super table join with projection query, if anyone of the subquery is exhausted, the query completed. + */ + pSql->numOfSubs = 0; + + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { pCmd->command = TSDB_SQL_SELECT; assert(pSql->fp == NULL); tscProcessSql(pSql); @@ -614,7 +631,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { } // check!!! - if (rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { + if (rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { break; } } @@ -640,7 +657,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { nRows = taos_fetch_block_impl(res, rows); while (*rows == NULL && tscProjectionQueryOnMetric(pCmd)) { /* reach the maximum number of output rows, abort */ - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { + if (tscHasReachLimitation(pSql)) { return 0; } @@ -650,11 +667,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal; pCmd->limit.offset = pRes->offset; -#ifdef CLUSTER - if ((++pSql->cmd.vnodeIdx) <= pMeterMetaInfo->pMetricMeta->numOfVnodes) { -#else - if ((++pSql->cmd.vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { -#endif + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { pSql->cmd.command = TSDB_SQL_SELECT; assert(pSql->fp == NULL); tscProcessSql(pSql); @@ -662,7 +675,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { } // check!!! - if (*rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { + if (*rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { break; } } @@ -784,9 +797,8 @@ int taos_errno(TAOS *taos) { } char *taos_errstr(TAOS *taos) { - STscObj * pObj = (STscObj *)taos; - unsigned char code; - char temp[256] = {0}; + STscObj *pObj = (STscObj *)taos; + uint8_t code; if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode]; @@ -795,12 +807,15 @@ char *taos_errstr(TAOS *taos) { else code = pObj->pSql->res.code; + // for invalid sql, additional information is attached to explain why the sql is invalid if (code == TSDB_CODE_INVALID_SQL) { - snprintf(temp, tListLen(temp), "invalid SQL: %s", pObj->pSql->cmd.payload); - strcpy(pObj->pSql->cmd.payload, temp); return pObj->pSql->cmd.payload; } else { - return tsError[code]; + if (code < 0 || code > TSDB_CODE_MAX_ERROR_CODE) { + return tsError[TSDB_CODE_SUCCESS]; + } else { + return tsError[code]; + } } } @@ -868,7 +883,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) break; case TSDB_DATA_TYPE_BIGINT: - len += sprintf(str + len, "%lld ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: @@ -886,15 +901,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) size_t xlen = strlen(row[i]); size_t trueLen = MIN(xlen, fields[i].bytes); - memcpy(str + len, (char*) row[i], trueLen); + memcpy(str + len, (char *)row[i], trueLen); str[len + trueLen] = ' '; len += (trueLen + 1); - } - break; + } break; case TSDB_DATA_TYPE_TIMESTAMP: - len += sprintf(str + len, "%lld ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_BOOL: @@ -923,7 +937,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj); int32_t sqlLen = strlen(sql); - if (sqlLen > TSDB_MAX_SQL_LEN) { + if (sqlLen > tsMaxSQLStringLen) { tscError("%p sql too long", pSql); pRes->code = TSDB_CODE_INVALID_SQL; return pRes->code; @@ -939,6 +953,12 @@ int taos_validate_sql(TAOS *taos, const char *sql) { strtolower(pSql->sqlstr, sql); + pSql->asyncTblPos = NULL; + if (NULL != pSql->pTableHashList) { + taosCleanUpIntHash(pSql->pTableHashList); + pSql->pTableHashList = NULL; + } + pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); int code = pRes->code; @@ -948,7 +968,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return code; } -static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t tblListLen) { +static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) { // must before clean the sqlcmd object tscRemoveAllMeterMetaInfo(&pSql->cmd, false); tscCleanSqlCmd(&pSql->cmd); @@ -959,11 +979,11 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t pCmd->count = 0; int code = TSDB_CODE_INVALID_METER_ID; - char *str = (char*) tblNameList; + char *str = (char *)tblNameList; SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); - if ((code = tscAllocPayload(pCmd, tblListLen+16)) != TSDB_CODE_SUCCESS) { + if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) { return code; } @@ -985,7 +1005,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t strtrim(tblName); len = (uint32_t)strlen(tblName); - + SSQLToken sToken = {.n = len, .type = TK_ID, .z = tblName}; tSQLGetToken(tblName, &sToken.type); @@ -1029,7 +1049,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t } int taos_load_table_info(TAOS *taos, const char *tableNameList) { - const int32_t MAX_TABLE_NAME_LENGTH = 12*1024*1024; // 12MB list + const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -1053,7 +1073,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { return pRes->code; } - char* str = calloc(1, tblListLen + 1); + char *str = calloc(1, tblListLen + 1); if (str == NULL) { pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscError("%p failed to malloc sql string buffer", pSql); @@ -1061,7 +1081,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { } strtolower(str, tableNameList); - pRes->code = (uint8_t) tscParseTblNameList(pSql, str, tblListLen); + pRes->code = (uint8_t)tscParseTblNameList(pSql, str, tblListLen); /* * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query. diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index ec8233c6f9519e27ce4c702742bc82b058421183..31af78f6188477b8a1866cd20dfe35c4c12018cd 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -15,7 +15,7 @@ #include "os.h" #include "tlog.h" -#include "tsql.h" +#include "tscSQLParser.h" #include "ttime.h" #include "ttimer.h" #include "tutil.h" @@ -85,7 +85,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { // failed to get meter/metric meta, retry in 10sec. if (code != TSDB_CODE_SUCCESS) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p,get metermeta failed, retry in %lldms", pStream->pSql, pStream, retryDelayTime); + tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); tscSetRetryTimer(pStream, pSql, retryDelayTime); return; @@ -136,7 +136,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf SSqlStream *pStream = (SSqlStream *)param; if (tres == NULL || numOfRows < 0) { int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, query data failed, code:%d, retry in %lldms", pStream->pSql, pStream, numOfRows, + tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, retryDelay); SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0); @@ -158,7 +158,7 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) { if (timestamp != actualTimestamp) { // reset the timestamp of each agg point by using start time of each interval *((int64_t *)pRes->data) = actualTimestamp; - tscWarn("%p stream:%p, timestamp of points is:%lld, reset to %lld", pSql, pStream, timestamp, actualTimestamp); + tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64 "", pSql, pStream, timestamp, actualTimestamp); } } @@ -169,7 +169,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf if (pSql == NULL || numOfRows < 0) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, retrieve data failed, code:%d, retry in %lldms", pSql, pStream, numOfRows, retryDelayTime); + tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); tscClearMeterMetaInfo(pMeterMetaInfo, true); tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); @@ -235,7 +235,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf /* no resuls in the query range, retry */ // todo set retry dynamic time int32_t retry = tsProjectExecInterval; - tscError("%p stream:%p, retrieve no data, code:%d, retry in %lldms", pSql, pStream, numOfRows, retry); + tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retry); tscClearSqlMetaInfoForce(&(pStream->pSql->cmd)); tscSetRetryTimer(pStream, pStream->pSql, retry); @@ -265,7 +265,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) /* * current time window will be closed, since it too early to exceed the maxRetentWindow value */ - tscTrace("%p stream:%p, etime:%lld is too old, exceeds the max retention time window:%lld, stop the stream", + tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here taos_close_stream(pStream); @@ -276,10 +276,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) return; } - tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream, + tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, now + timer, timer, pStream->stime, etime); } else { - tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream, + tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1); } @@ -299,7 +299,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { */ timer = pStream->slidingTime; if (pStream->stime > pStream->etime) { - tscTrace("%p stream:%p, stime:%lld is larger than end time: %lld, stop the stream", pStream->pSql, pStream, + tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here taos_close_stream(pStream); @@ -353,7 +353,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { int64_t minIntervalTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime; if (pCmd->nAggTimeInterval < minIntervalTime) { - tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%lld", pSql, pStream, + tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream, pCmd->nAggTimeInterval, minIntervalTime); pCmd->nAggTimeInterval = minIntervalTime; } @@ -368,14 +368,14 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime; if (pCmd->nSlidingTime < minSlidingTime) { - tscWarn("%p stream:%p, original sliding value:%lld too small, reset to:%lld", pSql, pStream, pCmd->nSlidingTime, + tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream, pCmd->nSlidingTime, minSlidingTime); pCmd->nSlidingTime = minSlidingTime; } if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - tscWarn("%p stream:%p, sliding value:%lld can not be larger than interval range, reset to:%lld", pSql, pStream, + tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream, pCmd->nSlidingTime, pCmd->nAggTimeInterval); pCmd->nSlidingTime = pCmd->nAggTimeInterval; @@ -401,11 +401,11 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in } else { // timewindow based aggregation stream if (stime == 0) { // no data in meter till now stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval; - tscWarn("%p stream:%p, last timestamp:0, reset to:%lld", pSql, pStream, stime); + tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64 "", pSql, pStream, stime); } else { int64_t newStime = (stime / pStream->interval) * pStream->interval; if (newStime != stime) { - tscWarn("%p stream:%p, last timestamp:%lld, reset to:%lld", pSql, pStream, stime, newStime); + tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64 "", pSql, pStream, stime, newStime); stime = newStime; } } @@ -447,7 +447,10 @@ static void setErrorInfo(STscObj* pObj, int32_t code, char* info) { SSqlCmd* pCmd = &pObj->pSql->cmd; pObj->pSql->res.code = code; - strncpy(pCmd->payload, info, pCmd->payloadLen); + + if (info != NULL) { + strncpy(pCmd->payload, info, pCmd->payloadLen); + } } TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), @@ -537,7 +540,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p int64_t starttime = tscGetLaunchTimestamp(pStream); taosTmrReset(tscProcessStreamTimer, starttime, pStream, tscTmr, &pStream->pTimer); - tscTrace("%p stream:%p is opened, query on:%s, interval:%lld, sliding:%lld, first launched in:%lld, sql:%s", pSql, + tscTrace("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql, pStream, pMeterMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, sqlstr); return pStream; @@ -546,7 +549,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p void taos_close_stream(TAOS_STREAM *handle) { SSqlStream *pStream = (SSqlStream *)handle; - SSqlObj *pSql = (SSqlObj *)__sync_val_compare_and_swap_64(&pStream->pSql, pStream->pSql, 0); + SSqlObj *pSql = (SSqlObj *)atomic_exchange_ptr(&pStream->pSql, 0); if (pSql == NULL) { return; } diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index dee8f02118c0fd2ef22428d67393a801073c23c8..f2e9395c68b0dfc5a057b331cf00d38dbd9cb311 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -13,7 +13,7 @@ * along with this program. If not, see . */ -#include +#include "os.h" #include "shash.h" #include "taos.h" @@ -56,7 +56,7 @@ TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, c if (pSub->taos == NULL) { tfree(pSub); } else { - char qstr[128]; + char qstr[256] = {0}; sprintf(qstr, "use %s", db); int res = taos_query(pSub->taos, qstr); if (res != 0) { @@ -64,7 +64,7 @@ TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, c taos_close(pSub->taos); tfree(pSub); } else { - sprintf(qstr, "select * from %s where _c0 > now+1000d", pSub->name); + snprintf(qstr, tListLen(qstr), "select * from %s where _c0 > now+1000d", pSub->name); if (taos_query(pSub->taos, qstr)) { tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); taos_close(pSub->taos); @@ -106,7 +106,7 @@ TAOS_ROW taos_consume(TAOS_SUB *tsub) { pSub->stime = taosGetTimestampMs(); - sprintf(qstr, "select * from %s where _c0 > %lld order by _c0 asc", pSub->name, pSub->lastKey); + sprintf(qstr, "select * from %s where _c0 > %" PRId64 " order by _c0 asc", pSub->name, pSub->lastKey); if (taos_query(pSub->taos, qstr)) { tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); return NULL; diff --git a/src/client/src/tscSyntaxtreefunction.c b/src/client/src/tscSyntaxtreefunction.c index 00781919e466c5049de84650eacd1d0badf58b72..914053f2f17461e0f5e7ffbd56691e7fd206cd49 100644 --- a/src/client/src/tscSyntaxtreefunction.c +++ b/src/client/src/tscSyntaxtreefunction.c @@ -13,13 +13,10 @@ * along with this program. If not, see . */ -#include -#include -#include -#include +#include "os.h" #include "tscSyntaxtreefunction.h" -#include "tsql.h" +#include "tscSQLParser.h" #include "ttypes.h" #include "tutil.h" diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 7ebd43cd19d2b6b1f94ec21651c0fa3c2578c84f..6efe3447196d384548a97442419f0aa91c1b3c16 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -13,15 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taosmsg.h" #include "tcache.h" @@ -54,6 +45,10 @@ extern int tscEmbedded; int tscNumOfThreads; static pthread_once_t tscinit = PTHREAD_ONCE_INIT; +extern int tsTscEnableRecordSql; +extern int tsNumOfLogLines; +void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); + void tscCheckDiskUsage(void *para, void *unused) { taosGetDisk(); taosTmrReset(tscCheckDiskUsage, 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); @@ -92,6 +87,12 @@ void taos_init_imp() { tscTrace("Local IP address is:%s", tsLocalIp); } + taosSetCoreDump(); + + if (tsTscEnableRecordSql != 0) { + taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); + } + #ifdef CLUSTER tscMgmtIpList.numOfIps = 2; strcpy(tscMgmtIpList.ipstr[0], tsMasterIp); @@ -185,57 +186,57 @@ void taos_init_imp() { tscConnCache = taosOpenConnCache(tsMaxMeterConnections * 2, taosCloseRpcConn, tscTmr, tsShellActivityTimer * 1000); initialized = 1; - tscTrace("taos client is initialized successfully"); + tscTrace("client is initialized successfully"); tsInsertHeadSize = tsRpcHeadSize + sizeof(SShellSubmitMsg); } void taos_init() { pthread_once(&tscinit, taos_init_imp); } -int taos_options(TSDB_OPTION option, const void *arg, ...) { - char * pStr = NULL; - SGlobalConfig *cfg_configDir = tsGetConfigOption("configDir"); - SGlobalConfig *cfg_activetimer = tsGetConfigOption("shellActivityTimer"); - SGlobalConfig *cfg_locale = tsGetConfigOption("locale"); - SGlobalConfig *cfg_charset = tsGetConfigOption("charset"); - SGlobalConfig *cfg_timezone = tsGetConfigOption("timezone"); - SGlobalConfig *cfg_socket = tsGetConfigOption("sockettype"); +static int taos_options_imp(TSDB_OPTION option, const char *pStr) { + SGlobalConfig *cfg = NULL; switch (option) { case TSDB_OPTION_CONFIGDIR: - pStr = (char *)arg; - if (cfg_configDir && cfg_configDir->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + cfg = tsGetConfigOption("configDir"); + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { strncpy(configDir, pStr, TSDB_FILENAME_LEN); - cfg_configDir->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; tscPrint("set config file directory:%s", pStr); } else { - tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_configDir->option, pStr, - tsCfgStatusStr[cfg_configDir->cfgStatus], (char *)cfg_configDir->ptr); + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, pStr, + tsCfgStatusStr[cfg->cfgStatus], (char *)cfg->ptr); } break; case TSDB_OPTION_SHELL_ACTIVITY_TIMER: - if (cfg_activetimer && cfg_activetimer->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { - tsShellActivityTimer = atoi((char *)arg); + cfg = tsGetConfigOption("shellActivityTimer"); + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + tsShellActivityTimer = atoi(pStr); if (tsShellActivityTimer < 1) tsShellActivityTimer = 1; if (tsShellActivityTimer > 3600) tsShellActivityTimer = 3600; - cfg_activetimer->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; tscPrint("set shellActivityTimer:%d", tsShellActivityTimer); } else { - tscWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg_activetimer->option, pStr, - tsCfgStatusStr[cfg_activetimer->cfgStatus], (int32_t *)cfg_activetimer->ptr); + tscWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, pStr, + tsCfgStatusStr[cfg->cfgStatus], (int32_t *)cfg->ptr); } break; case TSDB_OPTION_LOCALE: { // set locale - pStr = (char *)arg; - + cfg = tsGetConfigOption("locale"); + assert(cfg != NULL); + size_t len = strlen(pStr); if (len == 0 || len > TSDB_LOCALE_LEN) { tscPrint("Invalid locale:%s, use default", pStr); return -1; } - if (cfg_locale && cfg_charset && cfg_locale->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { char sep = '.'; if (strlen(tsLocale) == 0) { // locale does not set yet @@ -248,7 +249,7 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { if (locale != NULL) { tscPrint("locale set, prev locale:%s, new locale:%s", tsLocale, locale); - cfg_locale->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; } else { // set the user-specified localed failed, use default LC_CTYPE as current locale locale = setlocale(LC_CTYPE, tsLocale); tscPrint("failed to set locale:%s, current locale:%s", pStr, tsLocale); @@ -270,7 +271,7 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { } strncpy(tsCharset, charset, tListLen(tsCharset)); - cfg_charset->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; } else { tscPrint("charset:%s is not valid in locale, charset remains:%s", charset, tsCharset); @@ -281,23 +282,24 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { tscPrint("charset remains:%s", tsCharset); } } else { - tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_locale->option, pStr, - tsCfgStatusStr[cfg_locale->cfgStatus], (char *)cfg_locale->ptr); + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, pStr, + tsCfgStatusStr[cfg->cfgStatus], (char *)cfg->ptr); } break; } case TSDB_OPTION_CHARSET: { /* set charset will override the value of charset, assigned during system locale changed */ - pStr = (char *)arg; - + cfg = tsGetConfigOption("charset"); + assert(cfg != NULL); + size_t len = strlen(pStr); if (len == 0 || len > TSDB_LOCALE_LEN) { tscPrint("failed to set charset:%s", pStr); return -1; } - if (cfg_charset && cfg_charset->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { if (taosValidateEncodec(pStr)) { if (strlen(tsCharset) == 0) { tscPrint("charset is set:%s", pStr); @@ -306,48 +308,71 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { } strncpy(tsCharset, pStr, tListLen(tsCharset)); - cfg_charset->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; } else { tscPrint("charset:%s not valid", pStr); } } else { - tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_charset->option, pStr, - tsCfgStatusStr[cfg_charset->cfgStatus], (char *)cfg_charset->ptr); + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, pStr, + tsCfgStatusStr[cfg->cfgStatus], (char *)cfg->ptr); } break; } case TSDB_OPTION_TIMEZONE: - pStr = (char *)arg; - if (cfg_timezone && cfg_timezone->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + cfg = tsGetConfigOption("timezone"); + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { strcpy(tsTimezone, pStr); tsSetTimeZone(); - cfg_timezone->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; tscTrace("timezone set:%s, input:%s by taos_options", tsTimezone, pStr); } else { - tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_timezone->option, pStr, - tsCfgStatusStr[cfg_timezone->cfgStatus], (char *)cfg_timezone->ptr); + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, pStr, + tsCfgStatusStr[cfg->cfgStatus], (char *)cfg->ptr); } break; case TSDB_OPTION_SOCKET_TYPE: - if (cfg_socket && cfg_socket->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { - if (strcasecmp(arg, TAOS_SOCKET_TYPE_NAME_UDP) != 0 && strcasecmp(arg, TAOS_SOCKET_TYPE_NAME_TCP) != 0) { + cfg = tsGetConfigOption("sockettype"); + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + if (strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_UDP) != 0 && strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_TCP) != 0) { tscError("only 'tcp' or 'udp' allowed for configuring the socket type"); return -1; } - strncpy(tsSocketType, arg, tListLen(tsSocketType)); - cfg_socket->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + strncpy(tsSocketType, pStr, tListLen(tsSocketType)); + cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; tscPrint("socket type is set:%s", tsSocketType); } break; default: + // TODO return the correct error code to client in the format for taos_errstr() tscError("Invalid option %d", option); return -1; } return 0; } + + +int taos_options(TSDB_OPTION option, const void *arg, ...) { + static int32_t lock = 0; + + for (int i = 1; atomic_val_compare_exchange_32(&lock, 0, 1) != 0; ++i) { + if (i % 1000 == 0) { + tscPrint("haven't acquire lock after spin %d times.", i); + sched_yield(); + } + } + + int ret = taos_options_imp(option, (const char*)arg); + + atomic_store_32(&lock, 0); + return ret; +} diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ec5c9806129197929667bfa30f2e55ce1f7446d7..4b0df767848b3117f0d4e382c5cb88b6843fc8ef 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -13,10 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include - #include "os.h" #include "ihash.h" #include "taosmsg.h" @@ -55,7 +51,6 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { assert(len < tListLen(tagIdBuf)); const int32_t maxKeySize = TSDB_MAX_TAGS_LEN; // allowed max key size - char* tmp = calloc(1, TSDB_MAX_SQL_LEN); SCond* cond = tsGetMetricQueryCondPos(pTagCond, uid); @@ -64,12 +59,24 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { sprintf(join, "%s,%s", pTagCond->joinInfo.left.meterId, pTagCond->joinInfo.right.meterId); } - int32_t keyLen = - snprintf(tmp, TSDB_MAX_SQL_LEN, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name, - (cond != NULL ? cond->cond.z : NULL), pTagCond->tbnameCond.cond.n > 0 ? pTagCond->tbnameCond.cond.z : NULL, + // estimate the buffer size + size_t tbnameCondLen = pTagCond->tbnameCond.cond != NULL? strlen(pTagCond->tbnameCond.cond):0; + size_t redundantLen = 20; + + size_t bufSize = strlen(pMeterMetaInfo->name) + tbnameCondLen + strlen(join) + strlen(tagIdBuf); + if (cond != NULL) { + bufSize += strlen(cond->cond); + } + + bufSize = (size_t) ((bufSize + redundantLen) * 1.5); + char* tmp = calloc(1, bufSize); + + int32_t keyLen = snprintf(tmp, bufSize, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name, + (cond != NULL ? cond->cond : NULL), + (tbnameCondLen > 0 ? pTagCond->tbnameCond.cond : NULL), pTagCond->relType, join, tagIdBuf, pCmd->groupbyExpr.orderType); - assert(keyLen <= TSDB_MAX_SQL_LEN); + assert(keyLen <= bufSize); if (keyLen < maxKeySize) { strcpy(str, tmp); @@ -103,7 +110,7 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str) { SCond* pDest = &pTagCond->cond[pTagCond->numOfTagCond]; pDest->uid = uid; - pDest->cond = SStringCreate(str); + pDest->cond = strdup(str); pTagCond->numOfTagCond += 1; } @@ -146,7 +153,6 @@ bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd) { return false; } - void tscGetDBInfoFromMeterId(char* meterId, char* db) { char* st = strstr(meterId, TS_PATH_DELIMITER); if (st != NULL) { @@ -238,10 +244,9 @@ bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) { //for project query, only the following two function is allowed for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); - int32_t functionId = pExpr->functionId; + int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && - functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS) { + functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) { return false; } } @@ -249,6 +254,17 @@ bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) { return true; } +bool tscProjectionQueryOnTable(SSqlCmd* pCmd) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; + if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TS) { + return false; + } + } + + return true; +} + bool tscIsPointInterpQuery(SSqlCmd* pCmd) { for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); @@ -269,7 +285,7 @@ bool tscIsPointInterpQuery(SSqlCmd* pCmd) { } bool tscIsTWAQuery(SSqlCmd* pCmd) { - for(int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); if (pExpr == NULL) { continue; @@ -435,15 +451,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { free(pSql); } -STableDataBlocks* tscCreateDataBlock(int32_t size) { - STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks)); - dataBuf->nAllocSize = (uint32_t)size; - dataBuf->pData = calloc(1, dataBuf->nAllocSize); - dataBuf->ordered = true; - dataBuf->prevTS = INT64_MIN; - return dataBuf; -} - void tscDestroyDataBlock(STableDataBlocks* pDataBlock) { if (pDataBlock == NULL) { return; @@ -451,10 +458,14 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) { tfree(pDataBlock->pData); tfree(pDataBlock->params); + + // free the refcount for metermeta + taosRemoveDataFromCache(tscCacheHandle, (void**) &(pDataBlock->pMeterMeta), false); tfree(pDataBlock); } -SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, uint32_t offset) { +SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, + uint32_t offset) { uint32_t needed = pDataBlock->numOfParams + 1; if (needed > pDataBlock->numOfAllocedParams) { needed *= 2; @@ -494,13 +505,13 @@ SDataBlockList* tscCreateBlockArrayList() { return pDataBlockArrayList; } -void tscAppendDataBlock(SDataBlockList *pList, STableDataBlocks *pBlocks) { +void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks) { if (pList->nSize >= pList->nAlloc) { - pList->nAlloc = pList->nAlloc << 1; - pList->pData = realloc(pList->pData, sizeof(void *) * (size_t)pList->nAlloc); + pList->nAlloc = (pList->nAlloc) << 1U; + pList->pData = realloc(pList->pData, POINTER_BYTES * (size_t)pList->nAlloc); // reset allocated memory - memset(pList->pData + pList->nSize, 0, sizeof(void *) * (pList->nAlloc - pList->nSize)); + memset(pList->pData + pList->nSize, 0, POINTER_BYTES * (pList->nAlloc - pList->nSize)); } pList->pData[pList->nSize++] = pBlocks; @@ -522,29 +533,43 @@ void* tscDestroyBlockArrayList(SDataBlockList* pList) { } int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { - SSqlCmd* pCmd = &pSql->cmd; - + SSqlCmd *pCmd = &pSql->cmd; + assert(pDataBlock->pMeterMeta != NULL); + pCmd->count = pDataBlock->numOfMeters; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - strcpy(pMeterMetaInfo->name, pDataBlock->meterId); - + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + + //set the correct metermeta object, the metermeta has been locked in pDataBlocks, so it must be in the cache + if (pMeterMetaInfo->pMeterMeta != pDataBlock->pMeterMeta) { + strcpy(pMeterMetaInfo->name, pDataBlock->meterId); + taosRemoveDataFromCache(tscCacheHandle, (void**) &(pMeterMetaInfo->pMeterMeta), false); + + pMeterMetaInfo->pMeterMeta = pDataBlock->pMeterMeta; + pDataBlock->pMeterMeta = NULL; // delegate the ownership of metermeta to pMeterMetaInfo + } else { + assert(strncmp(pMeterMetaInfo->name, pDataBlock->meterId, tListLen(pDataBlock->meterId)) == 0); + } + /* * the submit message consists of : [RPC header|message body|digest] * the dataBlock only includes the RPC Header buffer and actual submit messsage body, space for digest needs * additional space. */ int ret = tscAllocPayload(pCmd, pDataBlock->nAllocSize + sizeof(STaosDigest)); - if (TSDB_CODE_SUCCESS != ret) return ret; + if (TSDB_CODE_SUCCESS != ret) { + return ret; + } + memcpy(pCmd->payload, pDataBlock->pData, pDataBlock->nAllocSize); - + /* * the payloadLen should be actual message body size * the old value of payloadLen is the allocated payload size */ pCmd->payloadLen = pDataBlock->nAllocSize - tsRpcHeadSize; - + assert(pCmd->allocSize >= pCmd->payloadLen + tsRpcHeadSize + sizeof(STaosDigest)); - return tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + return TSDB_CODE_SUCCESS; } void tscFreeUnusedDataBlocks(SDataBlockList* pList) { @@ -556,19 +581,38 @@ void tscFreeUnusedDataBlocks(SDataBlockList* pList) { } } -STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name) { - STableDataBlocks *dataBuf = tscCreateDataBlock(size); +/** + * create the in-memory buffer for each table to keep the submitted data block + * @param initialSize + * @param rowSize + * @param startOffset + * @param name + * @param pMeterMeta the ownership of pMeterMeta should be transfer to STableDataBlocks + * @return + */ +STableDataBlocks* tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name) { + + STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks)); + dataBuf->nAllocSize = (uint32_t) initialSize; + dataBuf->pData = calloc(1, dataBuf->nAllocSize); + dataBuf->ordered = true; + dataBuf->prevTS = INT64_MIN; dataBuf->rowSize = rowSize; dataBuf->size = startOffset; dataBuf->tsSource = -1; strncpy(dataBuf->meterId, name, TSDB_METER_ID_LEN); + + // sure that the metermeta must be in the local client cache + dataBuf->pMeterMeta = taosGetDataFromCache(tscCacheHandle, dataBuf->meterId); + assert(dataBuf->pMeterMeta != NULL && initialSize > 0); + return dataBuf; } STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, char* tableId) { + int32_t startOffset, int32_t rowSize, const char* tableId) { STableDataBlocks* dataBuf = NULL; STableDataBlocks** t1 = (STableDataBlocks**)taosGetIntHashData(pHashList, id); @@ -577,7 +621,7 @@ STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pData } if (dataBuf == NULL) { - dataBuf = tscCreateDataBlockEx((size_t) size, rowSize, startOffset, tableId); + dataBuf = tscCreateDataBlock((size_t)size, rowSize, startOffset, tableId); dataBuf = *(STableDataBlocks**)taosAddIntHash(pHashList, id, (char*)&dataBuf); tscAppendDataBlock(pDataBlockList, dataBuf); } @@ -608,7 +652,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi if (tmp != NULL) { dataBuf->pData = tmp; memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); - } else { // failed to allocate memory, free already allocated memory and return error code + } else { // failed to allocate memory, free already allocated memory and return error code tscError("%p failed to allocate memory for merging submit block, size:%d", pSql, dataBuf->nAllocSize); taosCleanUpIntHash(pVnodeDataBlockHashList); @@ -677,7 +721,7 @@ int tscAllocPayload(SSqlCmd* pCmd, int size) { pCmd->allocSize = size; } else { if (pCmd->allocSize < size) { - char* b = realloc(pCmd->payload, size); + char* b = realloc(pCmd->payload, size); if (b == NULL) return TSDB_CODE_CLI_OUT_OF_MEMORY; pCmd->payload = b; pCmd->allocSize = size; @@ -724,7 +768,7 @@ static void evic(SFieldInfo* pFieldInfo, int32_t index) { } } -static void setValueImpl(TAOS_FIELD* pField, int8_t type, char* name, int16_t bytes) { +static void setValueImpl(TAOS_FIELD* pField, int8_t type, const char* name, int16_t bytes) { pField->type = type; strncpy(pField->name, name, TSDB_COL_NAME_LEN); pField->bytes = bytes; @@ -768,7 +812,7 @@ void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visib } } -void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, char* name, int16_t bytes) { +void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, const char* name, int16_t bytes) { ensureSpace(pFieldInfo, pFieldInfo->numOfOutputCols + 1); evic(pFieldInfo, index); @@ -873,11 +917,11 @@ void tscClearFieldInfo(SFieldInfo* pFieldInfo) { static void _exprCheckSpace(SSqlExprInfo* pExprInfo, int32_t size) { if (size > pExprInfo->numOfAlloc) { - int32_t oldSize = pExprInfo->numOfAlloc; + uint32_t oldSize = pExprInfo->numOfAlloc; - int32_t newSize = (oldSize <= 0) ? 8 : (oldSize << 1); + uint32_t newSize = (oldSize <= 0) ? 8 : (oldSize << 1U); while (newSize < size) { - newSize = (newSize << 1); + newSize = (newSize << 1U); } if (newSize > TSDB_MAX_COLUMNS) { @@ -900,6 +944,19 @@ static void _exprEvic(SSqlExprInfo* pExprInfo, int32_t index) { } } +SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId) { + SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; + + _exprCheckSpace(pExprInfo, pExprInfo->numOfExprs + 1); + _exprEvic(pExprInfo, index); + + SSqlExpr* pExpr = &pExprInfo->pExprs[index]; + pExpr->functionId = functionId; + + pExprInfo->numOfExprs++; + return pExpr; +} + SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t interSize) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pColIndex->tableIndex); @@ -1108,7 +1165,8 @@ void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* sr *dst = *src; if (dst->filterOnBinary) { size_t len = (size_t) dst->len + 1; - dst->pz = calloc(1, len); + char* pTmp = calloc(1, len); + dst->pz = (int64_t) pTmp; memcpy((char*) dst->pz, (char*) src->pz, (size_t) len); } } @@ -1165,14 +1223,15 @@ void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo) { assert(pColumnBaseInfo->numOfCols <= TSDB_MAX_COLUMNS); for (int32_t i = 0; i < pColumnBaseInfo->numOfCols; ++i) { - SColumnBase *pColBase = &(pColumnBaseInfo->pColList[i]); + SColumnBase* pColBase = &(pColumnBaseInfo->pColList[i]); if (pColBase->numOfFilters > 0) { for (int32_t j = 0; j < pColBase->numOfFilters; ++j) { assert(pColBase->filterInfo[j].filterOnBinary == 0 || pColBase->filterInfo[j].filterOnBinary == 1); if (pColBase->filterInfo[j].filterOnBinary) { - tfree(pColBase->filterInfo[j].pz); + free((char*) pColBase->filterInfo[j].pz); + pColBase->filterInfo[j].pz = 0; } } } @@ -1183,8 +1242,9 @@ void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo) { tfree(pColumnBaseInfo->pColList); } - -void tscColumnBaseInfoReserve(SColumnBaseInfo* pColumnBaseInfo, int32_t size) { _cf_ensureSpace(pColumnBaseInfo, size); } +void tscColumnBaseInfoReserve(SColumnBaseInfo* pColumnBaseInfo, int32_t size) { + _cf_ensureSpace(pColumnBaseInfo, size); +} /* * 1. normal name, not a keyword or number @@ -1232,16 +1292,16 @@ int32_t tscValidateName(SSQLToken* pToken) { int len = tSQLGetToken(pToken->z, &pToken->type); // single token, validate it - if (len == pToken->n){ + if (len == pToken->n) { return validateQuoteToken(pToken); } else { - sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true); - if (sep == NULL) { - return TSDB_CODE_INVALID_SQL; - } + sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true); + if (sep == NULL) { + return TSDB_CODE_INVALID_SQL; + } return tscValidateName(pToken); - } + } } else { if (isNumber(pToken)) { return TSDB_CODE_INVALID_SQL; @@ -1284,8 +1344,7 @@ int32_t tscValidateName(SSQLToken* pToken) { // re-build the whole name string if (pStr[firstPartLen] == TS_PATH_DELIMITER[0]) { - // first part do not have quote - // do nothing + // first part do not have quote do nothing } else { pStr[firstPartLen] = TS_PATH_DELIMITER[0]; memmove(&pStr[firstPartLen + 1], pToken->z, pToken->n); @@ -1331,14 +1390,20 @@ bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId) { void tscTagCondCopy(STagCond* dest, const STagCond* src) { memset(dest, 0, sizeof(STagCond)); + + if (src->tbnameCond.cond != NULL) { + dest->tbnameCond.cond = strdup(src->tbnameCond.cond); + } - SStringCopy(&dest->tbnameCond.cond, &src->tbnameCond.cond); dest->tbnameCond.uid = src->tbnameCond.uid; memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo)); for (int32_t i = 0; i < src->numOfTagCond; ++i) { - SStringCopy(&dest->cond[i].cond, &src->cond[i].cond); + if (src->cond[i].cond != NULL) { + dest->cond[i].cond = strdup(src->cond[i].cond); + } + dest->cond[i].uid = src->cond[i].uid; } @@ -1347,10 +1412,9 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) { } void tscTagCondRelease(STagCond* pCond) { - SStringFree(&pCond->tbnameCond.cond); - + free(pCond->tbnameCond.cond); for (int32_t i = 0; i < pCond->numOfTagCond; ++i) { - SStringFree(&pCond->cond[i].cond); + free(pCond->cond[i].cond); } memset(pCond, 0, sizeof(STagCond)); @@ -1449,7 +1513,11 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { * data blocks have been submit to vnode. */ SDataBlockList* pDataBlocks = pCmd->pDataBlocks; - if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + assert(pSql->cmd.numOfTables == 1); + + if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { tscTrace("%p object should be release since all data blocks have been submit", pSql); return true; } else { @@ -1462,10 +1530,11 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { } SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index) { - if (pCmd == NULL || index >= pCmd->numOfTables || index < 0) { + if (pCmd == NULL || pCmd->numOfTables == 0) { return NULL; } + assert(index >= 0 && index <= pCmd->numOfTables && pCmd->pMeterInfo != NULL); return pCmd->pMeterInfo[index]; } @@ -1508,7 +1577,7 @@ SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMetaInfo->numOfTags = numOfTags; if (tags != NULL) { - memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(int16_t) * numOfTags); + memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(pMeterMetaInfo->tagColumnIndex[0]) * numOfTags); } pCmd->numOfTables += 1; @@ -1562,130 +1631,13 @@ void tscResetForNextRetrieve(SSqlRes* pRes) { pRes->numOfRows = 0; } -SString SStringCreate(const char* str) { - size_t len = strlen(str); - - SString dest = {.n = len, .alloc = len + 1}; - dest.z = calloc(1, dest.alloc); - strcpy(dest.z, str); - - return dest; -} - -void SStringCopy(SString* pDest, const SString* pSrc) { - if (pSrc->n > 0) { - pDest->n = pSrc->n; - pDest->alloc = pDest->n + 1; // one additional space for null terminate - - pDest->z = calloc(1, pDest->alloc); - - memcpy(pDest->z, pSrc->z, pDest->n); - } else { - memset(pDest, 0, sizeof(SString)); - } -} - -void SStringFree(SString* pStr) { - if (pStr->alloc > 0) { - tfree(pStr->z); - pStr->alloc = 0; - } -} - -void SStringShrink(SString* pStr) { - if (pStr->alloc > (pStr->n + 1) && pStr->alloc > (pStr->n * 2)) { - pStr->z = realloc(pStr->z, pStr->n + 1); - assert(pStr->z != NULL); - - pStr->alloc = pStr->n + 1; - } -} - -int32_t SStringAlloc(SString* pStr, int32_t size) { - if (pStr->alloc >= size) { - return TSDB_CODE_SUCCESS; - } - - size = ALIGN8(size); - - char* tmp = NULL; - if (pStr->z != NULL) { - tmp = realloc(pStr->z, size); - memset(pStr->z + pStr->n, 0, size - pStr->n); - } else { - tmp = calloc(1, size); - } - - if (tmp == NULL) { -#ifdef WINDOWS - LPVOID lpMsgBuf; - FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, - GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language - (LPTSTR)&lpMsgBuf, 0, NULL); - tscTrace("failed to allocate memory, reason:%s", lpMsgBuf); - LocalFree(lpMsgBuf); -#else - char errmsg[256] = {0}; - strerror_r(errno, errmsg, tListLen(errmsg)); - tscTrace("failed to allocate memory, reason:%s", errmsg); -#endif - return TSDB_CODE_CLI_OUT_OF_MEMORY; - } - - pStr->z = tmp; - pStr->alloc = size; - - return TSDB_CODE_SUCCESS; -} - -#define MIN_ALLOC_SIZE 8 - -int32_t SStringEnsureRemain(SString* pStr, int32_t size) { - if (pStr->alloc - pStr->n > size) { - return TSDB_CODE_SUCCESS; - } - - // remain space is insufficient, allocate more spaces - int32_t inc = (size < MIN_ALLOC_SIZE) ? size : MIN_ALLOC_SIZE; - if (inc < (pStr->alloc >> 1)) { - inc = (pStr->alloc >> 1); - } - - // get the new size - int32_t newsize = pStr->alloc + inc; - - char* tmp = realloc(pStr->z, newsize); - if (tmp == NULL) { - -#ifdef WINDOWS - LPVOID lpMsgBuf; - FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, - GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language - (LPTSTR)&lpMsgBuf, 0, NULL); - tscTrace("failed to allocate memory, reason:%s", lpMsgBuf); - LocalFree(lpMsgBuf); -#else - char errmsg[256] = {0}; - strerror_r(errno, errmsg, tListLen(errmsg)); - tscTrace("failed to allocate memory, reason:%s", errmsg); -#endif - - return TSDB_CODE_CLI_OUT_OF_MEMORY; - } - - memset(tmp + pStr->n, 0, inc); - pStr->z = tmp; - - return TSDB_CODE_SUCCESS; -} - -SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param, - SSqlObj* pPrevSql) { +SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql) { SSqlCmd* pCmd = &pSql->cmd; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); if (pNew == NULL) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); return NULL; } @@ -1694,7 +1646,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex pNew->sqlstr = strdup(pSql->sqlstr); if (pNew->sqlstr == NULL) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); free(pNew); return NULL; @@ -1719,20 +1671,18 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex tscTagCondCopy(&pNew->cmd.tagCond, &pCmd->tagCond); if (tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); tscFreeSqlObj(pNew); return NULL; } tscColumnBaseInfoCopy(&pNew->cmd.colList, &pCmd->colList, (int16_t)tableIndex); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); - + // set the correct query type if (pPrevSql != NULL) { pNew->cmd.type = pPrevSql->cmd.type; } else { - pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; // it must be the subquery + pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; // it must be the subquery } uint64_t uid = pMeterMetaInfo->pMeterMeta->uid; @@ -1756,15 +1706,16 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex } pNew->fp = fp; - pNew->param = param; - pNew->cmd.vnodeIdx = vnodeIndex; - SMeterMetaInfo* pMetermetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); - + char key[TSDB_MAX_TAGS_LEN + 1] = {0}; - tscGetMetricMetaCacheKey(pCmd, key, pMetermetaInfo->pMeterMeta->uid); - - char* name = pMeterMetaInfo->name; + tscGetMetricMetaCacheKey(pCmd, key, uid); + +#ifdef _DEBUG_VIEW + printf("the metricmeta key is:%s\n", key); +#endif + + char* name = pMeterMetaInfo->name; SMeterMetaInfo* pFinalInfo = NULL; if (pPrevSql == NULL) { @@ -1772,28 +1723,29 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex SMetricMeta* pMetricMeta = taosGetDataFromCache(tscCacheHandle, key); pFinalInfo = tscAddMeterMetaInfo(&pNew->cmd, name, pMeterMeta, pMetricMeta, pMeterMetaInfo->numOfTags, - pMeterMetaInfo->tagColumnIndex); + pMeterMetaInfo->tagColumnIndex); } else { SMeterMetaInfo* pPrevInfo = tscGetMeterMetaInfo(&pPrevSql->cmd, 0); - pFinalInfo = tscAddMeterMetaInfo(&pNew->cmd, name, pPrevInfo->pMeterMeta, pPrevInfo->pMetricMeta, pMeterMetaInfo->numOfTags, - pMeterMetaInfo->tagColumnIndex); + pFinalInfo = tscAddMeterMetaInfo(&pNew->cmd, name, pPrevInfo->pMeterMeta, pPrevInfo->pMetricMeta, + pMeterMetaInfo->numOfTags, pMeterMetaInfo->tagColumnIndex); pPrevInfo->pMeterMeta = NULL; pPrevInfo->pMetricMeta = NULL; } assert(pFinalInfo->pMeterMeta != NULL); - if (UTIL_METER_IS_METRIC(pMetermetaInfo)) { + if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { assert(pFinalInfo->pMetricMeta != NULL); } - tscTrace("%p new subquery %p, vnodeIdx:%d, tableIndex:%d, type:%d", pSql, pNew, vnodeIndex, tableIndex, pNew->cmd.type); + tscTrace("%p new subquery %p, tableIndex:%d, vnodeIdx:%d, type:%d", pSql, pNew, tableIndex, + pMeterMetaInfo->vnodeIndex, pNew->cmd.type); return pNew; } void tscDoQuery(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; - void* fp = pSql->fp; + void* fp = pSql->fp; if (pCmd->command > TSDB_SQL_LOCAL) { tscProcessLocalCmd(pSql); @@ -1812,12 +1764,56 @@ void tscDoQuery(SSqlObj* pSql) { } } -int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid) { - STagCond* pTagCond = &pCmd->tagCond; - +int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid) { if (pTagCond->joinInfo.left.uid == uid) { return pTagCond->joinInfo.left.tagCol; } else { return pTagCond->joinInfo.right.tagCol; } } + +bool tscIsUpdateQuery(STscObj* pObj) { + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + return TSDB_CODE_DISCONNECTED; + } + + SSqlCmd* pCmd = &pObj->pSql->cmd; + return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || + TSDB_SQL_USE_DB == pCmd->command) ? 1 : 0; +} + +int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql) { + const char *msgFormat1 = "invalid SQL: %s"; + const char *msgFormat2 = "invalid SQL: syntax error near \"%s\" (%s)"; + const char *msgFormat3 = "invalid SQL: syntax error near \"%s\""; + + const int32_t BACKWARD_CHAR_STEP = 0; + + if (sql == NULL) { + assert(additionalInfo != NULL); + sprintf(msg, msgFormat1, additionalInfo); + return TSDB_CODE_INVALID_SQL; + } + + char buf[64] = {0}; // only extract part of sql string + strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1); + + if (additionalInfo != NULL) { + sprintf(msg, msgFormat2, buf, additionalInfo); + } else { + sprintf(msg, msgFormat3, buf); // no additional information for invalid sql error + } + + return TSDB_CODE_INVALID_SQL; +} + +bool tscHasReachLimitation(SSqlObj* pSql) { + assert(pSql != NULL && pSql->cmd.globalLimit != 0); + + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + return (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit); +} + diff --git a/src/connector/go/src/taosSql/rows.go b/src/connector/go/src/taosSql/rows.go index 6407fc334e929f3ba636519deb959fd9f0c40d10..5040dca06d205865379e21d45e5d5dd3e2b793db 100755 --- a/src/connector/go/src/taosSql/rows.go +++ b/src/connector/go/src/taosSql/rows.go @@ -118,14 +118,15 @@ func (rows *taosSqlRows) ColumnTypeScanType(i int) reflect.Type { return rows.rs.columns[i].scanType() } -func (rows *taosSqlRows) Close() (err error) { - mc := rows.mc - if mc == nil { - return nil +func (rows *taosSqlRows) Close() error { + if rows.mc != nil { + result := C.taos_use_result(rows.mc.taos) + if result != nil { + C.taos_free_result(result) + } + rows.mc = nil } - - rows.mc = nil - return err + return nil } func (rows *taosSqlRows) HasNextResultSet() (b bool) { diff --git a/src/connector/go/src/taosSql/taosSqlCgo.go b/src/connector/go/src/taosSql/taosSqlCgo.go index e240ac44ec23116c332b250499848e42143066f4..cc3aaa1658813cdfe667c5402c029525860fbf01 100755 --- a/src/connector/go/src/taosSql/taosSqlCgo.go +++ b/src/connector/go/src/taosSql/taosSqlCgo.go @@ -29,46 +29,47 @@ import ( "unsafe" ) -func (mc *taosConn) taosConnect(ip, user, pass, db string, port int) (taos unsafe.Pointer, err error){ +func (mc *taosConn) taosConnect(ip, user, pass, db string, port int) (taos unsafe.Pointer, err error) { cuser := C.CString(user) cpass := C.CString(pass) - cip := C.CString(ip) - cdb := C.CString(db) + cip := C.CString(ip) + cdb := C.CString(db) defer C.free(unsafe.Pointer(cip)) defer C.free(unsafe.Pointer(cuser)) defer C.free(unsafe.Pointer(cpass)) defer C.free(unsafe.Pointer(cdb)) - taosObj := C.taos_connect(cip, cuser, cpass, cdb, (C.int)(port)) - if taosObj == nil { - return nil, errors.New("taos_connect() fail!") - } + taosObj := C.taos_connect(cip, cuser, cpass, cdb, (C.ushort)(port)) + if taosObj == nil { + return nil, errors.New("taos_connect() fail!") + } - return (unsafe.Pointer)(taosObj), nil -} + return (unsafe.Pointer)(taosObj), nil +} func (mc *taosConn) taosQuery(sqlstr string) (int, error) { - taosLog.Printf("taosQuery() input sql:%s\n", sqlstr) + //taosLog.Printf("taosQuery() input sql:%s\n", sqlstr) - csqlstr := C.CString(sqlstr) + csqlstr := C.CString(sqlstr) defer C.free(unsafe.Pointer(csqlstr)) - code := int(C.taos_query(mc.taos, csqlstr)) + code := int(C.taos_query(mc.taos, csqlstr)) - if 0 != code { - mc.taos_error() - errStr := C.GoString(C.taos_errstr(mc.taos)) - taosLog.Println("taos_query() failed:", errStr) - return 0, errors.New(errStr) - } + if 0 != code { + mc.taos_error() + errStr := C.GoString(C.taos_errstr(mc.taos)) + taosLog.Println("taos_query() failed:", errStr) + taosLog.Printf("taosQuery() input sql:%s\n", sqlstr) + return 0, errors.New(errStr) + } - // read result and save into mc struct - num_fields := int(C.taos_field_count(mc.taos)) - if 0 == num_fields { // there are no select and show kinds of commands - mc.affectedRows = int(C.taos_affected_rows(mc.taos)) - mc.insertId = 0 - } + // read result and save into mc struct + num_fields := int(C.taos_field_count(mc.taos)) + if 0 == num_fields { // there are no select and show kinds of commands + mc.affectedRows = int(C.taos_affected_rows(mc.taos)) + mc.insertId = 0 + } - return num_fields, nil + return num_fields, nil } func (mc *taosConn) taos_close() { @@ -76,8 +77,8 @@ func (mc *taosConn) taos_close() { } func (mc *taosConn) taos_error() { - // free local resouce: allocated memory/metric-meta refcnt - //var pRes unsafe.Pointer - pRes := C.taos_use_result(mc.taos) - C.taos_free_result(pRes) + // free local resouce: allocated memory/metric-meta refcnt + //var pRes unsafe.Pointer + pRes := C.taos_use_result(mc.taos) + C.taos_free_result(pRes) } diff --git a/src/connector/go/src/taosSql/utils.go b/src/connector/go/src/taosSql/utils.go index a5a90059b50f57fab7b2f4e8749947a0af840265..a104322fcc9012ea0370bdbea9b89e6674daabf3 100755 --- a/src/connector/go/src/taosSql/utils.go +++ b/src/connector/go/src/taosSql/utils.go @@ -12,15 +12,25 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - package taosSql +/* +#cgo CFLAGS : -I/usr/include +#include +#cgo LDFLAGS: -L/usr/lib -ltaos +void taosSetAllocMode(int mode, const char* path, _Bool autoDump); +void taosDumpMemoryLeak(); +*/ +import "C" + + import ( "database/sql/driver" "errors" "fmt" "sync/atomic" "time" + "unsafe" ) // Returns the bool value of the input. @@ -398,3 +408,15 @@ func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { } +/****************************************************************************** +* Utils for C memory issues debugging * +******************************************************************************/ +func SetAllocMode(mode int32, path string) { + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + C.taosSetAllocMode(C.int(mode), cpath, false) +} + +func DumpMemoryLeak() { + C.taosDumpMemoryLeak() +} diff --git a/src/connector/jdbc/readme.md b/src/connector/jdbc/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..e81f078c153046265cbe9a856f7b48e26fc071fc --- /dev/null +++ b/src/connector/jdbc/readme.md @@ -0,0 +1,329 @@ + +## TAOS-JDBCDriver 概述 + +TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 + +由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 + +* libtaos.so + 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 + +* taos.dll + 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 + +> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。 + +TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: + +* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 +* 由于不支持删除和修改,所以也不支持事务操作。 +* 目前不支持表间的 union 操作。 +* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。 + + +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 + +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| --- | --- | --- | +| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | Java DataType | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT, TINYINT |java.lang.Short | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | + +## 如何获取 TAOS-JDBCDriver + +### maven 仓库 + +目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 +* [sonatype][8] +* [mvnrepository][9] +* [maven.aliyun][10] + +maven 项目中使用如下 pom.xml 配置即可: + +```xml + + + com.taosdata.jdbc + taos-jdbcdriver + 1.0.3 + + +``` + +### 源码编译打包 + +下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 + + +## 使用说明 + +### 获取连接 + +如下所示配置即可获取 TDengine Connection: +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` +> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 + +TDengine 的 JDBC URL 规范格式为: +`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: + +* user:登录 TDengine 用户名,默认值 root。 +* password:用户登录密码,默认值 taosdata。 +* charset:客户端使用的字符集,默认值为系统字符集。 +* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* locale:客户端语言环境,默认值系统当前 locale。 +* timezone:客户端使用的时区,默认值为系统当前时区。 + +以上参数可以在 3 处配置,`优先级由高到低`分别如下: +1. JDBC URL 参数 + 如上所述,可以在 JDBC URL 的参数中指定。 +2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +3. 客户端配置文件 taos.cfg + + linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 +```properties +# client default username +# defaultUser root + +# client default password +# defaultPass taosdata + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` +> 更多详细配置请参考[客户端配置][13] + +### 创建数据库和表 + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` +> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 + +### 插入数据 + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` +> now 为系统内部函数,默认为服务器当前时间。 +> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 + +### 查询数据 + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` +> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 + + +### 关闭资源 + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` +> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 +## 与连接池使用 + +**HikariCP** + +* 引入相应 HikariCP maven 依赖: +```xml + + com.zaxxer + HikariCP + 3.4.1 + +``` + +* 使用示例如下: +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMinimumIdle(3); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool + config.setIdleTimeout(60000); // max idle time for recycle idle connection + config.setConnectionTestQuery("describe log.dn"); //validation query + config.setValidationTimeout(3000); //validation query timeout + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 +> 更多 HikariCP 使用问题请查看[官方说明][5] + +**Druid** + +* 引入相应 Druid maven 依赖: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* 使用示例如下: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 更多 druid 使用问题请查看[官方说明][6] + +**注意事项** +* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 + +如下所示,`select server_status()` 执行成功会返回 `1`。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +## 与框架使用 + +* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11] +* Springboot + Mybatis 中使用,可参考 [springbootdemo][12] + +## 常见问题 + +* java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **原因**:程序没有找到依赖的本地函数库 taos。 + + **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + +* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **原因**:目前 TDengine 只支持 64 位 JDK。 + + **解决方法**:重新安装 64 位 JDK。 + +* 其它问题请参考 [Issues][7] + + + +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 7881c06b6a3e6e61685abb9329b9ca36af803ee5..c297fb67b063355b4499b1020499104e1e10f627 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -273,6 +273,9 @@ public class TSDBDriver implements java.sql.Driver { String user = ""; for (String queryStr : queryStrings) { String[] kvPair = queryStr.trim().split("="); + if (kvPair.length < 2){ + continue; + } switch (kvPair[0].toLowerCase()) { case PROPERTY_KEY_USER: urlProps.setProperty(PROPERTY_KEY_USER, kvPair[1]); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index c66a8322c12ccda32d502685d00b98357aeb4d80..3adb601822567a6a7c515fa405801024e99a4609 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -134,7 +134,7 @@ public class TSDBJNIConnector { } } - // Try retrieving result set for the executed SQLusing the current connection pointer. If the executed + // Try retrieving result set for the executed SQL using the current connection pointer. If the executed // SQL is a DML/DDL which doesn't return a result set, then taosResultSetPointer should be 0L. Otherwise, // taosResultSetPointer should be a non-zero value. taosResultSetPointer = this.getResultSetImp(this.taos); diff --git a/src/inc/sdb.h b/src/inc/sdb.h index 4a1969b700bef0b0fbce3e7058c7f33146174bdf..a0e0a1b2f2e8815f5425a65f6f793fad4b0bc847 100644 --- a/src/inc/sdb.h +++ b/src/inc/sdb.h @@ -23,10 +23,10 @@ extern "C" { #include "taosmsg.h" #include "tsdb.h" -extern short sdbPeerPort; -extern short sdbSyncPort; +extern uint16_t tsMgmtMgmtPort; +extern uint16_t tsMgmtSyncPort; extern int sdbMaxNodes; -extern int sdbHbTimer; // seconds +extern int tsMgmtPeerHBTimer; // seconds extern char sdbZone[]; extern char sdbMasterIp[]; extern char sdbPrivateIp[]; @@ -105,7 +105,7 @@ extern SSdbPeer *sdbPeer[]; #endif -void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, uint8_t keyType, char *directory, void *(*appTool)(char, void *, char *, int, int *)); void *sdbGetRow(void *handle, void *key); diff --git a/src/inc/sql.y b/src/inc/sql.y index e04324c5def478509210d91de299b2219f84f5b8..3d0ded56ebd245dd7591378f1f5b643f532f2a33 100644 --- a/src/inc/sql.y +++ b/src/inc/sql.y @@ -26,8 +26,7 @@ #include #include #include - -#include "tsql.h" +#include "tscSQLParser.h" #include "tutil.h" } @@ -74,6 +73,9 @@ cmd ::= SHOW CONFIGS. { setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); } cmd ::= SHOW SCORES. { setDCLSQLElems(pInfo, SHOW_SCORES, 0); } cmd ::= SHOW GRANTS. { setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } +cmd ::= SHOW VNODES. { setDCLSQLElems(pInfo, SHOW_VNODES, 0); } +cmd ::= SHOW VNODES IPTOKEN(X). { setDCLSQLElems(pInfo, SHOW_VNODES, 1, &X); } + %type dbPrefix {SSQLToken} dbPrefix(A) ::=. {A.n = 0;} dbPrefix(A) ::= ids(X) DOT. {A = X; } @@ -113,7 +115,7 @@ cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). { } cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDCLSQLElems(pInfo, DROP_DATABASE, 2, &X, &Y); } -cmd ::= DROP DNODE IP(X). { setDCLSQLElems(pInfo, DROP_DNODE, 1, &X); } +cmd ::= DROP DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, DROP_DNODE, 1, &X); } cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, DROP_USER, 1, &X); } cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, DROP_ACCOUNT, 1, &X); } @@ -129,8 +131,8 @@ cmd ::= DESCRIBE ids(X) cpxName(Y). { /////////////////////////////////THE ALTER STATEMENT//////////////////////////////////////// cmd ::= ALTER USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &X, &Y); } cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &X, &Y);} -cmd ::= ALTER DNODE IP(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_DNODE, 2, &X, &Y); } -cmd ::= ALTER DNODE IP(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, ALTER_DNODE, 3, &X, &Y, &Z); } +cmd ::= ALTER DNODE IPTOKEN(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_DNODE, 2, &X, &Y); } +cmd ::= ALTER DNODE IPTOKEN(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, ALTER_DNODE, 3, &X, &Y, &Z); } cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &X); } cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_LOCAL, 2, &X, &Y); } cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &X, &Y, &t);} @@ -155,7 +157,7 @@ ifnotexists(X) ::= . {X.n = 0;} /////////////////////////////////THE CREATE STATEMENT/////////////////////////////////////// //create option for dnode/db/user/account -cmd ::= CREATE DNODE IP(X). { setDCLSQLElems(pInfo, CREATE_DNODE, 1, &X);} +cmd ::= CREATE DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, CREATE_DNODE, 1, &X);} cmd ::= CREATE ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &X, &Y, &Z);} cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, CREATE_DATABASE, &X, &Y, &Z);} @@ -219,7 +221,8 @@ comp(Y) ::= COMP INTEGER(X). { Y = X; } prec(Y) ::= PRECISION STRING(X). { Y = X; } %type db_optr {SCreateDBInfo} -db_optr ::= . {} +db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);} + db_optr(Y) ::= db_optr(Z) tables(X). { Y = Z; Y.tablesPerVnode = strtol(X.z, NULL, 10); } db_optr(Y) ::= db_optr(Z) cache(X). { Y = Z; Y.cacheBlockSize = strtol(X.z, NULL, 10); } db_optr(Y) ::= db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); } @@ -234,7 +237,7 @@ db_optr(Y) ::= db_optr(Z) prec(X). { Y = Z; Y.precision = X; } db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; } %type alter_db_optr {SCreateDBInfo} -alter_db_optr(Y) ::= . { memset(&Y, 0, sizeof(SCreateDBInfo));} +alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);} alter_db_optr(Y) ::= alter_db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) tables(X). { Y = Z; Y.tablesPerVnode = strtol(X.z, NULL, 10); } @@ -350,6 +353,14 @@ select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_ A = tSetQuerySQLElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G); } +// Support for the SQL exprssion without from & where subclauses, e.g., +// select current_database(), +// select server_version(), select client_version(), +// select server_state(); +select(A) ::= SELECT(T) selcollist(W). { + A = tSetQuerySQLElems(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +} + // selcollist is a list of expressions that are to become the return // values of the SELECT statement. The "*" in statements like // "SELECT * FROM ..." is encoded as a special expression with an opcode of TK_ALL. @@ -392,7 +403,7 @@ tmvar(A) ::= VARIABLE(X). {A = X;} %type interval_opt {SSQLToken} interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N = E; } -interval_opt(N) ::= . {N.n = 0; } +interval_opt(N) ::= . {N.n = 0; N.z = NULL; N.type = 0; } %type fill_opt {tVariantList*} %destructor fill_opt {tVariantListDestroy($$);} @@ -413,7 +424,7 @@ fill_opt(N) ::= FILL LP ID(Y) RP. { %type sliding_opt {SSQLToken} sliding_opt(K) ::= SLIDING LP tmvar(E) RP. {K = E; } -sliding_opt(K) ::= . {K.n = 0; } +sliding_opt(K) ::= . {K.n = 0; K.z = NULL; K.type = 0; } %type orderby_opt {tVariantList*} %destructor orderby_opt {tVariantListDestroy($$);} @@ -642,12 +653,12 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { } ////////////////////////////////////////kill statement/////////////////////////////////////// -cmd ::= KILL CONNECTION IP(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &X);} -cmd ::= KILL STREAM IP(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &X);} -cmd ::= KILL QUERY IP(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &X);} +cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &X);} +cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &X);} +cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &X);} %fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL COUNT SUM AVG MIN MAX FIRST LAST TOP BOTTOM STDDEV PERCENTILE APERCENTILE LEASTSQUARES HISTOGRAM DIFF - SPREAD TWA INTERP LAST_ROW NOW IP SEMI NONE PREV LINEAR IMPORT METRIC TBNAME JOIN METRICS STABLE. \ No newline at end of file + SPREAD TWA INTERP LAST_ROW NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT METRIC TBNAME JOIN METRICS STABLE NULL. diff --git a/src/inc/taos.h b/src/inc/taos.h index 94e99d582c6844473d351a7525c564f54f3c2ab5..2fd6d8be927a310e0131b62a8f0ecf55ae943ef2 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -59,7 +59,7 @@ typedef struct taosField { void taos_init(); int taos_options(TSDB_OPTION option, const void *arg, ...); -TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, int port); +TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); void taos_close(TAOS *taos); typedef struct TAOS_BIND { @@ -122,9 +122,6 @@ void taos_close_stream(TAOS_STREAM *tstr); int taos_load_table_info(TAOS *taos, const char* tableNameList); -// TODO: `configDir` should not be declared here -extern char configDir[]; // the path to global configuration - #ifdef __cplusplus } #endif diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h new file mode 100644 index 0000000000000000000000000000000000000000..2bee153955cc523595152092dd869c2b661e7c6f --- /dev/null +++ b/src/inc/taoserror.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TAOSERROR_H +#define TDENGINE_TAOSERROR_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define TSDB_CODE_SUCCESS 0 +#define TSDB_CODE_ACTION_IN_PROGRESS 1 + +#define TSDB_CODE_LAST_SESSION_NOT_FINISHED 5 +#define TSDB_CODE_INVALID_SESSION_ID 6 +#define TSDB_CODE_INVALID_TRAN_ID 7 +#define TSDB_CODE_INVALID_MSG_TYPE 8 +#define TSDB_CODE_ALREADY_PROCESSED 9 +#define TSDB_CODE_AUTH_FAILURE 10 +#define TSDB_CODE_WRONG_MSG_SIZE 11 +#define TSDB_CODE_UNEXPECTED_RESPONSE 12 +#define TSDB_CODE_INVALID_RESPONSE_TYPE 13 +#define TSDB_CODE_NO_RESOURCE 14 +#define TSDB_CODE_INVALID_TIME_STAMP 15 +#define TSDB_CODE_MISMATCHED_METER_ID 16 +#define TSDB_CODE_ACTION_TRANS_NOT_FINISHED 17 +#define TSDB_CODE_ACTION_NOT_ONLINE 18 +#define TSDB_CODE_ACTION_SEND_FAILD 19 +#define TSDB_CODE_NOT_ACTIVE_SESSION 20 +#define TSDB_CODE_INVALID_VNODE_ID 21 +#define TSDB_CODE_APP_ERROR 22 +#define TSDB_CODE_INVALID_IE 23 +#define TSDB_CODE_INVALID_VALUE 24 +#define TSDB_CODE_REDIRECT 25 +#define TSDB_CODE_ALREADY_THERE 26 +#define TSDB_CODE_INVALID_METER_ID 27 +#define TSDB_CODE_INVALID_SQL 28 +#define TSDB_CODE_NETWORK_UNAVAIL 29 +#define TSDB_CODE_INVALID_MSG_LEN 30 +#define TSDB_CODE_INVALID_DB 31 +#define TSDB_CODE_INVALID_TABLE 32 +#define TSDB_CODE_DB_ALREADY_EXIST 33 +#define TSDB_CODE_TABLE_ALREADY_EXIST 34 +#define TSDB_CODE_INVALID_USER 35 +#define TSDB_CODE_INVALID_ACCT 36 +#define TSDB_CODE_INVALID_PASS 37 +#define TSDB_CODE_DB_NOT_SELECTED 38 +#define TSDB_CODE_MEMORY_CORRUPTED 39 +#define TSDB_CODE_USER_ALREADY_EXIST 40 +#define TSDB_CODE_NO_RIGHTS 41 +#define TSDB_CODE_DISCONNECTED 42 +#define TSDB_CODE_NO_MASTER 43 +#define TSDB_CODE_NOT_CONFIGURED 44 +#define TSDB_CODE_INVALID_OPTION 45 +#define TSDB_CODE_NODE_OFFLINE 46 +#define TSDB_CODE_SYNC_REQUIRED 47 +#define TSDB_CODE_NO_ENOUGH_DNODES 48 +#define TSDB_CODE_UNSYNCED 49 +#define TSDB_CODE_TOO_SLOW 50 +#define TSDB_CODE_OTHERS 51 +#define TSDB_CODE_NO_REMOVE_MASTER 52 +#define TSDB_CODE_WRONG_SCHEMA 53 +#define TSDB_CODE_NOT_ACTIVE_VNODE 54 +#define TSDB_CODE_TOO_MANY_USERS 55 +#define TSDB_CODE_TOO_MANY_DATABSES 56 +#define TSDB_CODE_TOO_MANY_TABLES 57 +#define TSDB_CODE_TOO_MANY_DNODES 58 +#define TSDB_CODE_TOO_MANY_ACCTS 59 +#define TSDB_CODE_ACCT_ALREADY_EXIST 60 +#define TSDB_CODE_DNODE_ALREADY_EXIST 61 +#define TSDB_CODE_SDB_ERROR 62 +#define TSDB_CODE_METRICMETA_EXPIRED 63 // local cached metric-meta expired causes error in metric query +#define TSDB_CODE_NOT_READY 64 // peer is not ready to process data +#define TSDB_CODE_MAX_SESSIONS 65 // too many sessions +#define TSDB_CODE_MAX_CONNECTIONS 66 // too many connections +#define TSDB_CODE_SESSION_ALREADY_EXIST 67 +#define TSDB_CODE_NO_QSUMMARY 68 +#define TSDB_CODE_SERV_OUT_OF_MEMORY 69 +#define TSDB_CODE_INVALID_QHANDLE 70 +#define TSDB_CODE_RELATED_TABLES_EXIST 71 +#define TSDB_CODE_MONITOR_DB_FORBEIDDEN 72 +#define TSDB_CODE_VG_COMMITLOG_INIT_FAILED 73 +#define TSDB_CODE_VG_INIT_FAILED 74 +#define TSDB_CODE_DATA_ALREADY_IMPORTED 75 +#define TSDB_CODE_OPS_NOT_SUPPORT 76 +#define TSDB_CODE_INVALID_QUERY_ID 77 +#define TSDB_CODE_INVALID_STREAM_ID 78 +#define TSDB_CODE_INVALID_CONNECTION 79 +#define TSDB_CODE_ACTION_NOT_BALANCED 80 +#define TSDB_CODE_CLI_OUT_OF_MEMORY 81 +#define TSDB_CODE_DATA_OVERFLOW 82 +#define TSDB_CODE_QUERY_CANCELLED 83 +#define TSDB_CODE_GRANT_TIMESERIES_LIMITED 84 +#define TSDB_CODE_GRANT_EXPIRED 85 +#define TSDB_CODE_CLI_NO_DISKSPACE 86 +#define TSDB_CODE_FILE_CORRUPTED 87 +#define TSDB_CODE_INVALID_CLIENT_VERSION 88 +#define TSDB_CODE_INVALID_ACCT_PARAMETER 89 +#define TSDB_CODE_NOT_ENOUGH_TIME_SERIES 90 +#define TSDB_CODE_NO_WRITE_ACCESS 91 +#define TSDB_CODE_NO_READ_ACCESS 92 +#define TSDB_CODE_GRANT_DB_LIMITED 93 +#define TSDB_CODE_GRANT_USER_LIMITED 94 +#define TSDB_CODE_GRANT_CONN_LIMITED 95 +#define TSDB_CODE_GRANT_STREAM_LIMITED 96 +#define TSDB_CODE_GRANT_SPEED_LIMITED 97 +#define TSDB_CODE_GRANT_STORAGE_LIMITED 98 +#define TSDB_CODE_GRANT_QUERYTIME_LIMITED 99 +#define TSDB_CODE_GRANT_ACCT_LIMITED 100 +#define TSDB_CODE_GRANT_DNODE_LIMITED 101 +#define TSDB_CODE_GRANT_CPU_LIMITED 102 +#define TSDB_CODE_SESSION_NOT_READY 103 // table NOT in ready state +#define TSDB_CODE_BATCH_SIZE_TOO_BIG 104 +#define TSDB_CODE_TIMESTAMP_OUT_OF_RANGE 105 +#define TSDB_CODE_INVALID_QUERY_MSG 106 // failed to validate the sql expression msg by vnode +#define TSDB_CODE_CACHE_BLOCK_TS_DISORDERED 107 // time stamp in cache block is disordered +#define TSDB_CODE_FILE_BLOCK_TS_DISORDERED 108 // time stamp in file block is disordered +#define TSDB_CODE_INVALID_COMMIT_LOG 109 // commit log init failed +#define TSDB_CODE_SERV_NO_DISKSPACE 110 +#define TSDB_CODE_NOT_SUPER_TABLE 111 // operation only available for super table +#define TSDB_CODE_DUPLICATE_TAGS 112 // tags value for join not unique +#define TSDB_CODE_INVALID_SUBMIT_MSG 113 +#define TSDB_CODE_NOT_ACTIVE_TABLE 114 +#define TSDB_CODE_INVALID_TABLE_ID 115 +#define TSDB_CODE_INVALID_VNODE_STATUS 116 +#define TSDB_CODE_FAILED_TO_LOCK_RESOURCES 117 +#define TSDB_CODE_TABLE_ID_MISMATCH 118 + +#define TSDB_CODE_MAX_ERROR_CODE 119 + +#ifdef __cplusplus +} +#endif + +#endif //TDENGINE_TAOSERROR_H diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 1d5e1bb5a5642b18809b60ded817f4aa324a9ab0..22b10eaa60b1fe871539c9bdbe87db5d3fce2709 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -21,119 +21,7 @@ extern "C" { #endif #include "tsdb.h" - -#define TSDB_CODE_SUCCESS 0 -#define TSDB_CODE_ACTION_IN_PROGRESS 1 - -#define TSDB_CODE_LAST_SESSION_NOT_FINISHED 5 -#define TSDB_CODE_INVALID_SESSION_ID 6 -#define TSDB_CODE_INVALID_TRAN_ID 7 -#define TSDB_CODE_INVALID_MSG_TYPE 8 -#define TSDB_CODE_ALREADY_PROCESSED 9 -#define TSDB_CODE_AUTH_FAILURE 10 -#define TSDB_CODE_WRONG_MSG_SIZE 11 -#define TSDB_CODE_UNEXPECTED_RESPONSE 12 -#define TSDB_CODE_INVALID_RESPONSE_TYPE 13 -#define TSDB_CODE_NO_RESOURCE 14 -#define TSDB_CODE_INVALID_TIME_STAMP 15 -#define TSDB_CODE_MISMATCHED_METER_ID 16 -#define TSDB_CODE_ACTION_TRANS_NOT_FINISHED 17 -#define TSDB_CODE_ACTION_NOT_ONLINE 18 -#define TSDB_CODE_ACTION_SEND_FAILD 19 -#define TSDB_CODE_NOT_ACTIVE_SESSION 20 -#define TSDB_CODE_INSERT_FAILED 21 -#define TSDB_CODE_APP_ERROR 22 -#define TSDB_CODE_INVALID_IE 23 -#define TSDB_CODE_INVALID_VALUE 24 -#define TSDB_CODE_REDIRECT 25 -#define TSDB_CODE_ALREADY_THERE 26 -#define TSDB_CODE_INVALID_METER_ID 27 -#define TSDB_CODE_INVALID_SQL 28 -#define TSDB_CODE_NETWORK_UNAVAIL 29 -#define TSDB_CODE_INVALID_MSG_LEN 30 -#define TSDB_CODE_INVALID_DB 31 -#define TSDB_CODE_INVALID_TABLE 32 -#define TSDB_CODE_DB_ALREADY_EXIST 33 -#define TSDB_CODE_TABLE_ALREADY_EXIST 34 -#define TSDB_CODE_INVALID_USER 35 -#define TSDB_CODE_INVALID_ACCT 36 -#define TSDB_CODE_INVALID_PASS 37 -#define TSDB_CODE_DB_NOT_SELECTED 38 -#define TSDB_CODE_MEMORY_CORRUPTED 39 -#define TSDB_CODE_USER_ALREADY_EXIST 40 -#define TSDB_CODE_NO_RIGHTS 41 -#define TSDB_CODE_DISCONNECTED 42 -#define TSDB_CODE_NO_MASTER 43 -#define TSDB_CODE_NOT_CONFIGURED 44 -#define TSDB_CODE_INVALID_OPTION 45 -#define TSDB_CODE_NODE_OFFLINE 46 -#define TSDB_CODE_SYNC_REQUIRED 47 -#define TSDB_CODE_NO_ENOUGH_DNODES 48 -#define TSDB_CODE_UNSYNCED 49 -#define TSDB_CODE_TOO_SLOW 50 -#define TSDB_CODE_OTHERS 51 -#define TSDB_CODE_NO_REMOVE_MASTER 52 -#define TSDB_CODE_WRONG_SCHEMA 53 -#define TSDB_CODE_NO_RESULT 54 -#define TSDB_CODE_TOO_MANY_USERS 55 -#define TSDB_CODE_TOO_MANY_DATABSES 56 -#define TSDB_CODE_TOO_MANY_TABLES 57 -#define TSDB_CODE_TOO_MANY_DNODES 58 -#define TSDB_CODE_TOO_MANY_ACCTS 59 -#define TSDB_CODE_ACCT_ALREADY_EXIST 60 -#define TSDB_CODE_DNODE_ALREADY_EXIST 61 -#define TSDB_CODE_SDB_ERROR 62 -#define TSDB_CODE_METRICMETA_EXPIRED 63 // local cached metric-meta expired causes error in metric query -#define TSDB_CODE_NOT_READY 64 // peer is not ready to process data -#define TSDB_CODE_MAX_SESSIONS 65 // too many sessions -#define TSDB_CODE_MAX_CONNECTIONS 66 // too many connections -#define TSDB_CODE_SESSION_ALREADY_EXIST 67 -#define TSDB_CODE_NO_QSUMMARY 68 -#define TSDB_CODE_SERV_OUT_OF_MEMORY 69 -#define TSDB_CODE_INVALID_QHANDLE 70 -#define TSDB_CODE_RELATED_TABLES_EXIST 71 -#define TSDB_CODE_MONITOR_DB_FORBEIDDEN 72 -#define TSDB_CODE_VG_COMMITLOG_INIT_FAILED 73 -#define TSDB_CODE_VG_INIT_FAILED 74 -#define TSDB_CODE_DATA_ALREADY_IMPORTED 75 -#define TSDB_CODE_OPS_NOT_SUPPORT 76 -#define TSDB_CODE_INVALID_QUERY_ID 77 -#define TSDB_CODE_INVALID_STREAM_ID 78 -#define TSDB_CODE_INVALID_CONNECTION 79 -#define TSDB_CODE_ACTION_NOT_BALANCED 80 -#define TSDB_CODE_CLI_OUT_OF_MEMORY 81 -#define TSDB_CODE_DATA_OVERFLOW 82 -#define TSDB_CODE_QUERY_CANCELLED 83 -#define TSDB_CODE_GRANT_TIMESERIES_LIMITED 84 -#define TSDB_CODE_GRANT_EXPIRED 85 -#define TSDB_CODE_CLI_NO_DISKSPACE 86 -#define TSDB_CODE_FILE_CORRUPTED 87 -#define TSDB_CODE_INVALID_CLIENT_VERSION 88 -#define TSDB_CODE_INVALID_ACCT_PARAMETER 89 -#define TSDB_CODE_NOT_ENOUGH_TIME_SERIES 90 -#define TSDB_CODE_NO_WRITE_ACCESS 91 -#define TSDB_CODE_NO_READ_ACCESS 92 -#define TSDB_CODE_GRANT_DB_LIMITED 93 -#define TSDB_CODE_GRANT_USER_LIMITED 94 -#define TSDB_CODE_GRANT_CONN_LIMITED 95 -#define TSDB_CODE_GRANT_STREAM_LIMITED 96 -#define TSDB_CODE_GRANT_SPEED_LIMITED 97 -#define TSDB_CODE_GRANT_STORAGE_LIMITED 98 -#define TSDB_CODE_GRANT_QUERYTIME_LIMITED 99 -#define TSDB_CODE_GRANT_ACCT_LIMITED 100 -#define TSDB_CODE_GRANT_DNODE_LIMITED 101 -#define TSDB_CODE_GRANT_CPU_LIMITED 102 -#define TSDB_CODE_SESSION_NOT_READY 103 // table NOT in ready state -#define TSDB_CODE_BATCH_SIZE_TOO_BIG 104 -#define TSDB_CODE_TIMESTAMP_OUT_OF_RANGE 105 -#define TSDB_CODE_INVALID_QUERY_MSG 106 // failed to validate the sql expression msg by vnode -#define TSDB_CODE_CACHE_BLOCK_TS_DISORDERED 107 // time stamp in cache block is disordered -#define TSDB_CODE_FILE_BLOCK_TS_DISORDERED 108 // time stamp in file block is disordered -#define TSDB_CODE_INVALID_COMMIT_LOG 109 // commit log init failed -#define TSDB_CODE_SERVER_NO_SPACE 110 -#define TSDB_CODE_NOT_SUPER_TABLE 111 // -#define TSDB_CODE_DUPLICATE_TAGS 112 // tags value for join not unique -#define TSDB_CODE_INVALID_SUBMIT_MSG 113 +#include "taoserror.h" // message type #define TSDB_MSG_TYPE_REG 1 @@ -270,6 +158,7 @@ enum _mgmt_table { TSDB_MGMT_TABLE_CONNS, TSDB_MGMT_TABLE_SCORES, TSDB_MGMT_TABLE_GRANTS, + TSDB_MGMT_TABLE_VNODES, TSDB_MGMT_TABLE_MAX, }; @@ -333,10 +222,11 @@ typedef struct { // internal part uint32_t destId; + uint32_t destIp; char meterId[TSDB_UNI_LEN]; - short port; // for UDP only + uint16_t port; // for UDP only char empty[1]; - char msgType; + uint8_t msgType; int32_t msgLen; uint8_t content[0]; } STaosHeader; @@ -390,7 +280,7 @@ typedef struct { } SShellSubmitMsg; typedef struct SSchema { - char type; + uint8_t type; char name[TSDB_COL_NAME_LEN]; short colId; short bytes; @@ -421,7 +311,7 @@ typedef struct { } SCreateMsg; typedef struct { - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_METER_ID_LEN]; short ignoreNotExists; } SDropDbMsg, SUseDbMsg; @@ -461,6 +351,7 @@ typedef struct { } SAlterTableMsg; typedef struct { + char clientVersion[TSDB_VERSION_LEN]; char db[TSDB_METER_ID_LEN]; } SConnectMsg; @@ -598,7 +489,7 @@ typedef struct SColumnInfo { */ typedef struct SMeterSidExtInfo { int32_t sid; - void * pObj; + int64_t uid; char tags[]; } SMeterSidExtInfo; @@ -673,13 +564,12 @@ typedef struct { typedef struct { uint64_t qhandle; - int16_t free; + uint16_t free; } SRetrieveMeterMsg; typedef struct { int32_t numOfRows; int16_t precision; - int16_t compress; int64_t offset; // updated offset value for multi-vnode projection query int64_t useconds; char data[]; @@ -695,7 +585,7 @@ typedef struct { int64_t compStorage; int64_t pointsWritten; uint8_t syncStatus; - uint8_t reserved; + uint8_t reserved[15]; } SVnodeLoad; typedef struct { @@ -706,6 +596,11 @@ typedef struct { // NOTE: sizeof(SVnodeCfg) < TSDB_FILE_HEADER_LEN/4 typedef struct { char acct[TSDB_USER_LEN]; + /* + * the message is too large, so it may will overwrite the cfg information in meterobj.v* + * recover to origin codes + */ + //char db[TSDB_METER_ID_LEN+2]; // 8bytes align char db[TSDB_DB_NAME_LEN]; uint32_t vgId; int32_t maxSessions; @@ -729,7 +624,7 @@ typedef struct { char repStrategy; char loadLatest; // load into mem or not - char precision; // time resoluation + uint8_t precision; // time resolution char reserved[16]; } SVnodeCfg, SCreateDbMsg, SDbCfg, SAlterDbMsg; @@ -769,9 +664,10 @@ typedef struct { // internal message typedef struct { uint32_t destId; + uint32_t destIp; char meterId[TSDB_UNI_LEN]; char empty[3]; - char msgType; + uint8_t msgType; int32_t msgLen; uint8_t content[0]; } SIntMsg; @@ -831,9 +727,7 @@ typedef struct { int32_t numOfMeters; int32_t join; int32_t joinCondLen; // for join condition - int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM]; - } SMetricMetaMsg; typedef struct { diff --git a/src/inc/tast.h b/src/inc/tast.h index 798a7f89a004d7c43acbe813ba68362dadf1dbc8..d7950b54f6536258d9c5c64ed54a209e04305be7 100644 --- a/src/inc/tast.h +++ b/src/inc/tast.h @@ -25,7 +25,7 @@ extern "C" { #include #include "taosmsg.h" -#include "tsql.h" +#include "ttypes.h" struct tSQLBinaryExpr; struct SSchema; @@ -49,7 +49,7 @@ typedef struct tQueryInfo { int32_t offset; // offset value in tags int32_t colIdx; // index of column in schema uint8_t optr; // expression operator - SSchema sch; // schema of tags + SSchema sch; // schema of tags tVariant q; // query condition value on the specific schema, filter expression __compar_fn_t compare; // filter function } tQueryInfo; diff --git a/src/inc/tglobalcfg.h b/src/inc/tglobalcfg.h index e83c89593fb11e7e0eba74526e43811f8799af3e..37e8e20681c2b75762f5e7ffaba764652374f9d1 100644 --- a/src/inc/tglobalcfg.h +++ b/src/inc/tglobalcfg.h @@ -57,12 +57,12 @@ extern char scriptDir[]; extern char tsMasterIp[]; extern char tsSecondIp[]; -extern short tsMgmtVnodePort; -extern short tsMgmtShellPort; -extern short tsVnodeShellPort; -extern short tsVnodeVnodePort; -extern short tsMgmtMgmtPort; -extern short tsMgmtSyncPort; +extern uint16_t tsMgmtVnodePort; +extern uint16_t tsMgmtShellPort; +extern uint16_t tsVnodeShellPort; +extern uint16_t tsVnodeVnodePort; +extern uint16_t tsMgmtMgmtPort; +extern uint16_t tsMgmtSyncPort; extern int tsStatusInterval; extern int tsShellActivityTimer; @@ -74,13 +74,13 @@ extern int tsMetricMetaKeepTimer; extern float tsNumOfThreadsPerCore; extern float tsRatioOfQueryThreads; extern char tsPublicIp[]; -extern char tsInternalIp[]; extern char tsPrivateIp[]; extern char tsServerIpStr[]; extern short tsNumOfVnodesPerCore; extern short tsNumOfTotalVnodes; extern short tsCheckHeaderFile; extern uint32_t tsServerIp; +extern uint32_t tsPublicIpInt; extern int tsSessionsPerVnode; extern int tsAverageCacheBlocks; @@ -106,7 +106,6 @@ extern int tsMaxDbs; extern int tsMaxTables; extern int tsMaxDnodes; extern int tsMaxVGroups; -extern int tsShellActivityTimer; extern char tsMgmtZone[]; extern char tsLocalIp[]; @@ -127,6 +126,7 @@ extern int tsEnableHttpModule; extern int tsEnableMonitorModule; extern int tsRestRowLimit; extern int tsCompressMsgSize; +extern int tsMaxSQLStringLen; extern char tsSocketType[4]; @@ -141,7 +141,7 @@ extern int tsProjectExecInterval; extern int64_t tsMaxRetentWindow; extern char tsHttpIp[]; -extern short tsHttpPort; +extern uint16_t tsHttpPort; extern int tsHttpCacheSessions; extern int tsHttpSessionExpire; extern int tsHttpMaxThreads; @@ -150,6 +150,10 @@ extern int tsHttpEnableRecordSql; extern int tsTelegrafUseFieldNum; extern int tsAdminRowLimit; +extern int tsTscEnableRecordSql; +extern int tsAnyIp; +extern int tsIsCluster; + extern char tsMonitorDbName[]; extern char tsInternalPass[]; extern int tsMonitorInterval; @@ -169,12 +173,15 @@ extern uint32_t debugFlag; extern uint32_t odbcdebugFlag; extern uint32_t qdebugFlag; +extern uint32_t taosMaxTmrCtrl; + extern int tsRpcTimer; extern int tsRpcMaxTime; extern int tsUdpDelay; extern char version[]; extern char compatible_version[]; extern char gitinfo[]; +extern char gitinfoOfInternal[]; extern char buildinfo[]; extern char tsTimezone[64]; @@ -245,13 +252,15 @@ typedef struct { extern SGlobalConfig *tsGlobalConfig; extern int tsGlobalConfigNum; extern char * tsCfgStatusStr[]; -SGlobalConfig *tsGetConfigOption(char *option); +SGlobalConfig *tsGetConfigOption(const char *option); #define TSDB_CFG_MAX_NUM 110 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 +#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) + #ifdef __cplusplus } #endif diff --git a/src/inc/tnote.h b/src/inc/tnote.h new file mode 100644 index 0000000000000000000000000000000000000000..4f86736be4b1075d3a6a897ce1d996b136c11b70 --- /dev/null +++ b/src/inc/tnote.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TNOTE_H +#define TDENGINE_TNOTE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "os.h" +#include "tutil.h" +#include "tglobalcfg.h" + +#define MAX_NOTE_LINE_SIZE 66000 +#define NOTE_FILE_NAME_LEN 300 + +typedef struct _taosNoteInfo { + int taosNoteFileNum ; + int taosNoteMaxLines; + int taosNoteLines; + char taosNoteName[NOTE_FILE_NAME_LEN]; + int taosNoteFlag; + int taosNoteFd; + int taosNoteOpenInProgress; + pthread_mutex_t taosNoteMutex; +}taosNoteInfo; + +void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...); + +extern taosNoteInfo m_HttpNote; +extern taosNoteInfo m_TscNote; + +extern int tsHttpEnableRecordSql; +extern int tsTscEnableRecordSql; + +#define taosNotePrintHttp(...) \ + if (tsHttpEnableRecordSql) { \ + taosNotePrint(&m_HttpNote, __VA_ARGS__); \ + } + +#define taosNotePrintTsc(...) \ + if (tsTscEnableRecordSql) { \ + taosNotePrint(&m_TscNote, __VA_ARGS__); \ + } + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/trpc.h b/src/inc/trpc.h index 3ae6e9ea97fc21df0b5f6fad1d240162f975159e..97a0c905f8c3584fee0de46b064f79a78db19f15 100644 --- a/src/inc/trpc.h +++ b/src/inc/trpc.h @@ -25,7 +25,6 @@ extern "C" { #define TAOS_CONN_UDPS 0 #define TAOS_CONN_UDPC 1 -#define TAOS_CONN_UDP 1 #define TAOS_CONN_TCPS 2 #define TAOS_CONN_TCPC 3 #define TAOS_CONN_HTTPS 4 @@ -39,7 +38,7 @@ extern "C" { #define TAOS_ID_REALLOCATE 2 #define TAOS_CONN_SOCKET_TYPE_S() ((strcasecmp(tsSocketType, TAOS_SOCKET_TYPE_NAME_UDP) == 0)? TAOS_CONN_UDPS:TAOS_CONN_TCPS) -#define TAOS_CONN_SOCKET_TYPE_C() ((strcasecmp(tsSocketType, TAOS_SOCKET_TYPE_NAME_UDP) == 0)? TAOS_CONN_UDP:TAOS_CONN_TCPC) +#define TAOS_CONN_SOCKET_TYPE_C() ((strcasecmp(tsSocketType, TAOS_SOCKET_TYPE_NAME_UDP) == 0)? TAOS_CONN_UDPC:TAOS_CONN_TCPC) #define taosSendMsgToPeer(x, y, z) taosSendMsgToPeerH(x, y, z, NULL) #define taosOpenRpcChann(x, y, z) taosOpenRpcChannWithQ(x,y,z,NULL) @@ -48,7 +47,7 @@ extern "C" { typedef struct { char *localIp; // local IP used - short localPort; // local port + uint16_t localPort; // local port char *label; // for debug purpose int numOfThreads; // number of threads to handle connections void *(*fp)(char *, void *, void *); // function to process the incoming msg @@ -73,7 +72,7 @@ typedef struct { void * shandle; // pointer returned by taosOpenRpc void * ahandle; // handle provided by app char * peerIp; // peer IP string - short peerPort; // peer port + uint16_t peerPort; // peer port char spi; // security parameter index char encrypt; // encrypt algorithm char * secret; // key for authentication @@ -108,7 +107,7 @@ int taosSendSimpleRsp(void *thandle, char rsptype, char code); int taosSetSecurityInfo(int cid, int sid, char *id, int spi, int encrypt, char *secret, char *ckey); -void taosGetRpcConnInfo(void *thandle, uint32_t *peerId, uint32_t *peerIp, short *peerPort, int *cid, int *sid); +void taosGetRpcConnInfo(void *thandle, uint32_t *peerId, uint32_t *peerIp, uint16_t *peerPort, int *cid, int *sid); int taosGetOutType(void *thandle); diff --git a/src/inc/tsched.h b/src/inc/tsched.h index dffd7a298a940e6d0f9a5fc7c6a0543beaca38cc..827ecbbb421b78c7d5140efc5c3be6e1edca4578 100644 --- a/src/inc/tsched.h +++ b/src/inc/tsched.h @@ -32,6 +32,8 @@ typedef struct _sched_msg { void *taosInitScheduler(int queueSize, int numOfThreads, const char *label); +void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl); + int taosScheduleTask(void *qhandle, SSchedMsg *pMsg); void taosCleanUpScheduler(void *param); diff --git a/src/inc/tschemautil.h b/src/inc/tschemautil.h index 0b8a2d6a9337c173fb1992c86b3792ccff31e0a0..0031b4fa2590496ca59b02e877f755f273591d08 100644 --- a/src/inc/tschemautil.h +++ b/src/inc/tschemautil.h @@ -47,12 +47,13 @@ struct SSchema *tsGetSchema(SMeterMeta *pMeta); struct SSchema *tsGetTagSchema(SMeterMeta *pMeta); struct SSchema *tsGetColumnSchema(SMeterMeta *pMeta, int32_t startCol); +struct SSchema tsGetTbnameColumnSchema(); char *tsGetTagsValue(SMeterMeta *pMeta); bool tsMeterMetaIdentical(SMeterMeta *p1, SMeterMeta *p2); -void extractMeterName(char *meterId, char *name); +void extractTableName(char *meterId, char *name); SSQLToken extractDBName(char *meterId, char *name); diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index de4e430a462f851b3b34ff32fc8acc738a40ad8b..1190dc8420fe360a667c2d932cfc8a20e4321015 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -44,22 +44,6 @@ extern "C" { #define TSDB_TIME_PRECISION_MILLI_STR "ms" #define TSDB_TIME_PRECISION_MICRO_STR "us" -enum _status { - TSDB_STATUS_OFFLINE, - TSDB_STATUS_CREATING, - TSDB_STATUS_UNSYNCED, - TSDB_STATUS_SLAVE, - TSDB_STATUS_MASTER, - TSDB_STATUS_READY, -}; - -enum _syncstatus { - STDB_SSTATUS_INIT, - TSDB_SSTATUS_SYNCING, - TSDB_SSTATUS_SYNC_CACHE, - TSDB_SSTATUS_SYNC_FILE, -}; - #define TSDB_DATA_TYPE_BOOL 1 // 1 bytes #define TSDB_DATA_TYPE_TINYINT 2 // 1 byte #define TSDB_DATA_TYPE_SMALLINT 3 // 2 bytes @@ -116,6 +100,7 @@ enum _syncstatus { #define TSDB_COL_NAME_LEN 64 #define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 16 #define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE +#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 6mb #define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 16 #define TSDB_MAX_TAGS_LEN 512 @@ -162,8 +147,11 @@ enum _syncstatus { #define TSDB_MAX_MPEERS 5 #define TSDB_MAX_MGMT_IPS (TSDB_MAX_MPEERS+1) -//#define TSDB_REPLICA_MAX_NUM 3 #define TSDB_REPLICA_MIN_NUM 1 +/* + * this is defined in CMakeList.txt + */ +//#define TSDB_REPLICA_MAX_NUM 3 #define TSDB_TBNAME_COLUMN_INDEX (-1) #define TSDB_MULTI_METERMETA_MAX_NUM 100000 // maximum batch size allowed to load metermeta @@ -178,9 +166,6 @@ enum _syncstatus { #define TSDB_MIN_COMPRESSION_LEVEL 0 #define TSDB_MAX_COMPRESSION_LEVEL 2 -#define TSDB_MIN_CACHE_BLOCKS_PER_METER 32 -#define TSDB_MAX_CACHE_BLOCKS_PER_METER 40960 - #define TSDB_MIN_COMMIT_TIME_INTERVAL 30 #define TSDB_MAX_COMMIT_TIME_INTERVAL 40960 @@ -193,7 +178,9 @@ enum _syncstatus { #define TSDB_MIN_CACHE_BLOCKS 100 #define TSDB_MAX_CACHE_BLOCKS 409600 +#define TSDB_MIN_AVG_BLOCKS 2 #define TSDB_MAX_AVG_BLOCKS 2048 +#define TSDB_DEFAULT_AVG_BLOCKS 4 #define TSDB_MIN_TABLES_PER_VNODE 1 #define TSDB_MAX_TABLES_PER_VNODE 220000 @@ -220,21 +207,24 @@ enum _syncstatus { #define TSDB_MAX_RPC_THREADS 5 -#define TSDB_QUERY_TYPE_QUERY 0 // normal query -#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x1 // free qhandle at vnode +#define TSDB_QUERY_TYPE_QUERY 0 // normal query +#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01U // free qhandle at vnode /* * 1. ordinary sub query for select * from super_table * 2. all sqlobj generated by createSubqueryObj with this flag */ -#define TSDB_QUERY_TYPE_SUBQUERY 0x2 -#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x4 // two-stage subquery for super table - -#define TSDB_QUERY_TYPE_TABLE_QUERY 0x8 // query ordinary table; below only apply to client side -#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10 // query on super table -#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20 // join query -#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40 // select *,columns... query -#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80 // join sub query at the second stage +#define TSDB_QUERY_TYPE_SUBQUERY 0x02U +#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x04U // two-stage subquery for super table + +#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08U // query ordinary table; below only apply to client side +#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10U // query on super table +#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20U // join query +#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40U // select *,columns... query +#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80U // join sub query at the second stage + +#define TSQL_SO_ASC 1 +#define TSQL_SO_DESC 0 #ifdef __cplusplus } diff --git a/src/inc/tsocket.h b/src/inc/tsocket.h index 0a02fcf551d210ed2de5e53ca83e1697a2cfcd11..9eb4c26464538fc3ea9d6f088cfb209192636493 100644 --- a/src/inc/tsocket.h +++ b/src/inc/tsocket.h @@ -33,19 +33,19 @@ int taosWriteMsg(int fd, void *ptr, int nbytes); int taosReadMsg(int fd, void *ptr, int nbytes); -int taosOpenUdpSocket(char *ip, short port); +int taosOpenUdpSocket(char *ip, uint16_t port); -int taosOpenTcpClientSocket(char *ip, short port, char *localIp); +int taosOpenTcpClientSocket(char *ip, uint16_t port, char *localIp); -int taosOpenTcpServerSocket(char *ip, short port); +int taosOpenTcpServerSocket(char *ip, uint16_t port); int taosKeepTcpAlive(int sockFd); void taosCloseTcpSocket(int sockFd); -int taosOpenUDServerSocket(char *ip, short port); +int taosOpenUDServerSocket(char *ip, uint16_t port); -int taosOpenUDClientSocket(char *ip, short port); +int taosOpenUDClientSocket(char *ip, uint16_t port); int taosOpenRawSocket(char *ip); diff --git a/src/inc/tsqldef.h b/src/inc/tsqldef.h index 6d2c166eab57977f703017eebcd762584240781c..ea0500eb866f65dfe3ef77d0f7b318b2bcbdec0f 100644 --- a/src/inc/tsqldef.h +++ b/src/inc/tsqldef.h @@ -72,146 +72,144 @@ #define TK_CONFIGS 54 #define TK_SCORES 55 #define TK_GRANTS 56 -#define TK_DOT 57 -#define TK_TABLES 58 -#define TK_STABLES 59 -#define TK_VGROUPS 60 -#define TK_DROP 61 -#define TK_TABLE 62 -#define TK_DATABASE 63 -#define TK_DNODE 64 -#define TK_IP 65 -#define TK_USER 66 -#define TK_ACCOUNT 67 -#define TK_USE 68 -#define TK_DESCRIBE 69 -#define TK_ALTER 70 -#define TK_PASS 71 -#define TK_PRIVILEGE 72 -#define TK_LOCAL 73 -#define TK_IF 74 -#define TK_EXISTS 75 -#define TK_CREATE 76 -#define TK_PPS 77 -#define TK_TSERIES 78 -#define TK_DBS 79 -#define TK_STORAGE 80 -#define TK_QTIME 81 -#define TK_CONNS 82 -#define TK_STATE 83 -#define TK_KEEP 84 -#define TK_CACHE 85 -#define TK_REPLICA 86 -#define TK_DAYS 87 -#define TK_ROWS 88 -#define TK_ABLOCKS 89 -#define TK_TBLOCKS 90 -#define TK_CTIME 91 -#define TK_CLOG 92 -#define TK_COMP 93 -#define TK_PRECISION 94 -#define TK_LP 95 -#define TK_RP 96 -#define TK_TAGS 97 -#define TK_USING 98 -#define TK_AS 99 -#define TK_COMMA 100 -#define TK_NULL 101 -#define TK_SELECT 102 -#define TK_FROM 103 -#define TK_VARIABLE 104 -#define TK_INTERVAL 105 -#define TK_FILL 106 -#define TK_SLIDING 107 -#define TK_ORDER 108 -#define TK_BY 109 -#define TK_ASC 110 -#define TK_DESC 111 -#define TK_GROUP 112 -#define TK_HAVING 113 -#define TK_LIMIT 114 -#define TK_OFFSET 115 -#define TK_SLIMIT 116 -#define TK_SOFFSET 117 -#define TK_WHERE 118 -#define TK_NOW 119 -#define TK_INSERT 120 -#define TK_INTO 121 -#define TK_VALUES 122 -#define TK_RESET 123 -#define TK_QUERY 124 -#define TK_ADD 125 -#define TK_COLUMN 126 -#define TK_TAG 127 -#define TK_CHANGE 128 -#define TK_SET 129 -#define TK_KILL 130 -#define TK_CONNECTION 131 -#define TK_COLON 132 -#define TK_STREAM 133 -#define TK_ABORT 134 -#define TK_AFTER 135 -#define TK_ATTACH 136 -#define TK_BEFORE 137 -#define TK_BEGIN 138 -#define TK_CASCADE 139 -#define TK_CLUSTER 140 -#define TK_CONFLICT 141 -#define TK_COPY 142 -#define TK_DEFERRED 143 -#define TK_DELIMITERS 144 -#define TK_DETACH 145 -#define TK_EACH 146 -#define TK_END 147 -#define TK_EXPLAIN 148 -#define TK_FAIL 149 -#define TK_FOR 150 -#define TK_IGNORE 151 -#define TK_IMMEDIATE 152 -#define TK_INITIALLY 153 -#define TK_INSTEAD 154 -#define TK_MATCH 155 -#define TK_KEY 156 -#define TK_OF 157 -#define TK_RAISE 158 -#define TK_REPLACE 159 -#define TK_RESTRICT 160 -#define TK_ROW 161 -#define TK_STATEMENT 162 -#define TK_TRIGGER 163 -#define TK_VIEW 164 -#define TK_ALL 165 -#define TK_COUNT 166 -#define TK_SUM 167 -#define TK_AVG 168 -#define TK_MIN 169 -#define TK_MAX 170 -#define TK_FIRST 171 -#define TK_LAST 172 -#define TK_TOP 173 -#define TK_BOTTOM 174 -#define TK_STDDEV 175 -#define TK_PERCENTILE 176 -#define TK_APERCENTILE 177 -#define TK_LEASTSQUARES 178 -#define TK_HISTOGRAM 179 -#define TK_DIFF 180 -#define TK_SPREAD 181 -#define TK_TWA 182 -#define TK_INTERP 183 -#define TK_LAST_ROW 184 -#define TK_SEMI 185 -#define TK_NONE 186 -#define TK_PREV 187 -#define TK_LINEAR 188 -#define TK_IMPORT 189 -#define TK_METRIC 190 -#define TK_TBNAME 191 -#define TK_JOIN 192 -#define TK_METRICS 193 -#define TK_STABLE 194 -#define TK_QUESTION 195 +#define TK_VNODES 57 +#define TK_IPTOKEN 58 +#define TK_DOT 59 +#define TK_TABLES 60 +#define TK_STABLES 61 +#define TK_VGROUPS 62 +#define TK_DROP 63 +#define TK_TABLE 64 +#define TK_DATABASE 65 +#define TK_DNODE 66 +#define TK_USER 67 +#define TK_ACCOUNT 68 +#define TK_USE 69 +#define TK_DESCRIBE 70 +#define TK_ALTER 71 +#define TK_PASS 72 +#define TK_PRIVILEGE 73 +#define TK_LOCAL 74 +#define TK_IF 75 +#define TK_EXISTS 76 +#define TK_CREATE 77 +#define TK_PPS 78 +#define TK_TSERIES 79 +#define TK_DBS 80 +#define TK_STORAGE 81 +#define TK_QTIME 82 +#define TK_CONNS 83 +#define TK_STATE 84 +#define TK_KEEP 85 +#define TK_CACHE 86 +#define TK_REPLICA 87 +#define TK_DAYS 88 +#define TK_ROWS 89 +#define TK_ABLOCKS 90 +#define TK_TBLOCKS 91 +#define TK_CTIME 92 +#define TK_CLOG 93 +#define TK_COMP 94 +#define TK_PRECISION 95 +#define TK_LP 96 +#define TK_RP 97 +#define TK_TAGS 98 +#define TK_USING 99 +#define TK_AS 100 +#define TK_COMMA 101 +#define TK_NULL 102 +#define TK_SELECT 103 +#define TK_FROM 104 +#define TK_VARIABLE 105 +#define TK_INTERVAL 106 +#define TK_FILL 107 +#define TK_SLIDING 108 +#define TK_ORDER 109 +#define TK_BY 110 +#define TK_ASC 111 +#define TK_DESC 112 +#define TK_GROUP 113 +#define TK_HAVING 114 +#define TK_LIMIT 115 +#define TK_OFFSET 116 +#define TK_SLIMIT 117 +#define TK_SOFFSET 118 +#define TK_WHERE 119 +#define TK_NOW 120 +#define TK_INSERT 121 +#define TK_INTO 122 +#define TK_VALUES 123 +#define TK_RESET 124 +#define TK_QUERY 125 +#define TK_ADD 126 +#define TK_COLUMN 127 +#define TK_TAG 128 +#define TK_CHANGE 129 +#define TK_SET 130 +#define TK_KILL 131 +#define TK_CONNECTION 132 +#define TK_COLON 133 +#define TK_STREAM 134 +#define TK_ABORT 135 +#define TK_AFTER 136 +#define TK_ATTACH 137 +#define TK_BEFORE 138 +#define TK_BEGIN 139 +#define TK_CASCADE 140 +#define TK_CLUSTER 141 +#define TK_CONFLICT 142 +#define TK_COPY 143 +#define TK_DEFERRED 144 +#define TK_DELIMITERS 145 +#define TK_DETACH 146 +#define TK_EACH 147 +#define TK_END 148 +#define TK_EXPLAIN 149 +#define TK_FAIL 150 +#define TK_FOR 151 +#define TK_IGNORE 152 +#define TK_IMMEDIATE 153 +#define TK_INITIALLY 154 +#define TK_INSTEAD 155 +#define TK_MATCH 156 +#define TK_KEY 157 +#define TK_OF 158 +#define TK_RAISE 159 +#define TK_REPLACE 160 +#define TK_RESTRICT 161 +#define TK_ROW 162 +#define TK_STATEMENT 163 +#define TK_TRIGGER 164 +#define TK_VIEW 165 +#define TK_ALL 166 +#define TK_COUNT 167 +#define TK_SUM 168 +#define TK_AVG 169 +#define TK_MIN 170 +#define TK_MAX 171 +#define TK_FIRST 172 +#define TK_LAST 173 +#define TK_TOP 174 +#define TK_BOTTOM 175 +#define TK_STDDEV 176 +#define TK_PERCENTILE 177 +#define TK_APERCENTILE 178 +#define TK_LEASTSQUARES 179 +#define TK_HISTOGRAM 180 +#define TK_DIFF 181 +#define TK_SPREAD 182 +#define TK_TWA 183 +#define TK_INTERP 184 +#define TK_LAST_ROW 185 +#define TK_SEMI 186 +#define TK_NONE 187 +#define TK_PREV 188 +#define TK_LINEAR 189 +#define TK_IMPORT 190 +#define TK_METRIC 191 +#define TK_TBNAME 192 +#define TK_JOIN 193 +#define TK_METRICS 194 +#define TK_STABLE 195 #endif - - diff --git a/src/inc/tsqlfunction.h b/src/inc/tsqlfunction.h index b3243cd8115e1245c9126d003f591cc89f98916a..0ed6a9952ec5d8b9c3e006c092b58b6a7d644297 100644 --- a/src/inc/tsqlfunction.h +++ b/src/inc/tsqlfunction.h @@ -24,7 +24,6 @@ extern "C" { #include #include "trpc.h" -#include "tsql.h" #include "ttypes.h" #define TSDB_FUNC_INVALID_ID -1 @@ -228,8 +227,6 @@ typedef struct SPatternCompareInfo { int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int16_t *len, int16_t *interResBytes, int16_t extLength, bool isSuperTable); -SResultInfo *getResultSupportInfo(SQLFunctionCtx *pCtx); - int patternMatch(const char *zPattern, const char *zString, size_t size, const SPatternCompareInfo *pInfo); int WCSPatternMatch(const wchar_t *zPattern, const wchar_t *zString, size_t size, const SPatternCompareInfo *pInfo); diff --git a/src/inc/tstatus.h b/src/inc/tstatus.h deleted file mode 100644 index 34bc7c6f27385c92838a41489eac160b8733d03f..0000000000000000000000000000000000000000 --- a/src/inc/tstatus.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef TDENGINE_TSTATUS_H -#define TDENGINE_TSTATUS_H - -#ifdef __cplusplus -extern "C" { -#endif - -extern char *sdbDnodeStatusStr[]; -extern char *sdbDnodeBalanceStateStr[]; -extern char *sdbVnodeDropStateStr[]; -extern char *sdbVnodeSyncStatusStr[]; - -#ifdef __cplusplus -} -#endif - -#endif // TDENGINE_TSTATUS_H diff --git a/src/inc/tstoken.h b/src/inc/tstoken.h index 420fdaad9b8046e0875ac62a09a5796f737fb5de..3ecf28aa2af6717e751c2c7cf11956468c642df5 100644 --- a/src/inc/tstoken.h +++ b/src/inc/tstoken.h @@ -22,6 +22,18 @@ extern "C" { #include +#define TK_SPACE 200 +#define TK_COMMENT 201 +#define TK_ILLEGAL 202 +#define TK_HEX 203 // hex number 0x123 +#define TK_OCT 204 // oct number +#define TK_BIN 205 // bin format data 0b111 +#define TK_FILE 206 +#define TK_QUESTION 207 // denoting the placeholder of "?",when invoking statement bind query + +#define TSQL_TBNAME "TBNAME" +#define TSQL_TBNAME_L "tbname" + // used to denote the minimum unite in sql parsing typedef struct SSQLToken { uint32_t n; @@ -29,10 +41,6 @@ typedef struct SSQLToken { char * z; } SSQLToken; -#if 0 -char *tscGetToken(char *string, char **token, int *tokenLen); -#endif - /** * tokenizer for sql string * @param z diff --git a/src/inc/ttimer.h b/src/inc/ttimer.h index 8a7e81dac73873e25cead301db4f594651fc7c6f..b9bbbb04dd5a22252a8d9e587977324c7c51221e 100644 --- a/src/inc/ttimer.h +++ b/src/inc/ttimer.h @@ -21,40 +21,41 @@ extern "C" { #endif typedef void *tmr_h; +typedef void (*TAOS_TMR_CALLBACK)(void *, void *); extern uint32_t tmrDebugFlag; -extern int taosTmrThreads; +extern int taosTmrThreads; +extern uint32_t taosMaxTmrCtrl; #define tmrError(...) \ - if (tmrDebugFlag & DEBUG_ERROR) { \ + do { if (tmrDebugFlag & DEBUG_ERROR) { \ tprintf("ERROR TMR ", tmrDebugFlag, __VA_ARGS__); \ - } + } } while(0) + #define tmrWarn(...) \ - if (tmrDebugFlag & DEBUG_WARN) { \ + do { if (tmrDebugFlag & DEBUG_WARN) { \ tprintf("WARN TMR ", tmrDebugFlag, __VA_ARGS__); \ - } + } } while(0) + #define tmrTrace(...) \ - if (tmrDebugFlag & DEBUG_TRACE) { \ + do { if (tmrDebugFlag & DEBUG_TRACE) { \ tprintf("TMR ", tmrDebugFlag, __VA_ARGS__); \ - } + } } while(0) -#define MAX_NUM_OF_TMRCTL 512 #define MSECONDS_PER_TICK 5 -void *taosTmrInit(int maxTmr, int resoultion, int longest, char *label); +void *taosTmrInit(int maxTmr, int resoultion, int longest, const char *label); -tmr_h taosTmrStart(void (*fp)(void *, void *), int mseconds, void *param1, void *handle); +tmr_h taosTmrStart(TAOS_TMR_CALLBACK fp, int mseconds, void *param, void *handle); -void taosTmrStop(tmr_h tmrId); +bool taosTmrStop(tmr_h tmrId); -void taosTmrStopA(tmr_h *timerId); +bool taosTmrStopA(tmr_h *timerId); -void taosTmrReset(void (*fp)(void *, void *), int mseconds, void *param1, void *handle, tmr_h *pTmrId); +bool taosTmrReset(TAOS_TMR_CALLBACK fp, int mseconds, void *param, void *handle, tmr_h *pTmrId); void taosTmrCleanUp(void *handle); -void taosTmrList(void *handle); - #ifdef __cplusplus } #endif diff --git a/src/inc/ttypes.h b/src/inc/ttypes.h index b2ea8e918a611c692feb76e6a6be05b786874fac..0f8eb2d58c31b5fe8f138bad64f5ff41b70cab9e 100644 --- a/src/inc/ttypes.h +++ b/src/inc/ttypes.h @@ -50,7 +50,7 @@ bool isNull(const char *val, int32_t type); void setNull(char *val, int32_t type, int32_t bytes); void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems); -void assignVal(char *val, char *src, int32_t len, int32_t type); +void assignVal(char *val, const char *src, int32_t len, int32_t type); void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); // variant, each number/string/field_id has a corresponding struct during parsing sql diff --git a/src/inc/tutil.h b/src/inc/tutil.h index f63f8b6cec6d8d5c5f610e23126c7d8fa6cf8c5f..f3576790a93ad7c056aa7073b8ab55cbcc5da217 100644 --- a/src/inc/tutil.h +++ b/src/inc/tutil.h @@ -26,17 +26,19 @@ extern "C" { #include "tsdb.h" #ifndef STDERR_FILENO - #define VALIDFD(x) ((x) > 2) -#else - #define VALIDFD(x) ((x) > STDERR_FILENO) +#define STDERR_FILENO (2) #endif +#define FD_VALID(x) ((x) > STDERR_FILENO) +#define FD_INITIALIZER ((int32_t)-1) + #define WCHAR wchar_t + #define tfree(x) \ { \ if (x) { \ - free(x); \ - x = NULL; \ + free((void*)(x)); \ + x = 0; \ } \ } @@ -89,7 +91,7 @@ extern "C" { } else { \ return (x) < (y) ? -1 : 1; \ } \ - } while (0); + } while (0) #define GET_INT8_VAL(x) (*(int8_t *)(x)) #define GET_INT16_VAL(x) (*(int16_t *)(x)) @@ -169,13 +171,11 @@ int32_t taosInitTimer(void (*callback)(int), int32_t ms); */ uint32_t MurmurHash3_32(const void *key, int32_t len); -bool taosCheckDbName(char *db, char *monitordb); - bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len); bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs); -bool taosValidateEncodec(char *encodec); +bool taosValidateEncodec(const char *encodec); bool taosGetVersionNumber(char *versionStr, int *versionNubmer); @@ -187,20 +187,41 @@ static FORCE_INLINE void taosEncryptPass(uint8_t *inBuf, unsigned int inLen, cha memcpy(target, context.digest, TSDB_KEY_LEN); } +int taosCheckVersion(char *input_client_version, char *input_server_version, int compared_segments); + char *taosIpStr(uint32_t ipInt); -#ifdef _TAOS_MEM_TEST_ -// Use during test to simulate the success and failure scenarios of memory allocation -extern void* taos_malloc(unsigned int size, char* _func); -extern void* taos_calloc(unsigned int num, unsigned int size, char* _func); -extern void* taos_realloc(void* ptr, unsigned int size, char* _func); -extern void taos_free(void* ptr); -#define malloc(size) taos_malloc(size, __FUNCTION__) -#define calloc(num, size) taos_calloc(num, size, __FUNCTION__) -#define realloc(ptr, size) taos_realloc(ptr, size, __FUNCTION__) -#define free(ptr) taos_free(ptr) -#endif +uint32_t ip2uint(const char *const ip_addr); + +#define TAOS_ALLOC_MODE_DEFAULT 0 +#define TAOS_ALLOC_MODE_RANDOM_FAIL 1 +#define TAOS_ALLOC_MODE_DETECT_LEAK 2 +void taosSetAllocMode(int mode, const char* path, bool autoDump); +void taosDumpMemoryLeak(); + +#ifdef TAOS_MEM_CHECK + +void * taos_malloc(size_t size, const char *file, uint32_t line); +void * taos_calloc(size_t num, size_t size, const char *file, uint32_t line); +void * taos_realloc(void *ptr, size_t size, const char *file, uint32_t line); +void taos_free(void *ptr, const char *file, uint32_t line); +char * taos_strdup(const char *str, const char *file, uint32_t line); +char * taos_strndup(const char *str, size_t size, const char *file, uint32_t line); +ssize_t taos_getline(char **lineptr, size_t *n, FILE *stream, const char *file, uint32_t line); + +#ifndef TAOS_MEM_CHECK_IMPL + +#define malloc(size) taos_malloc(size, __FILE__, __LINE__) +#define calloc(num, size) taos_calloc(num, size, __FILE__, __LINE__) +#define realloc(ptr, size) taos_realloc(ptr, size, __FILE__, __LINE__) +#define free(ptr) taos_free(ptr, __FILE__, __LINE__) +#define strdup(str) taos_strdup(str, __FILE__, __LINE__) +#define strndup(str, size) taos_strndup(str, size, __FILE__, __LINE__) +#define getline(lineptr, n, stream) taos_getline(lineptr, n, stream, __FILE__, __LINE__) + +#endif // TAOS_MEM_CHECK_IMPL +#endif // TAOS_MEM_CHECK #ifdef __cplusplus } diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index 20aa0c9d3117c44b451cc6607dd4518d637b621a..7442367e91dbd1f972b0bfa703720e6d15be0182 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -6,7 +6,7 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) INCLUDE_DIRECTORIES(inc) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(./src SRC) LIST(REMOVE_ITEM SRC ./src/shellWindows.c) ADD_EXECUTABLE(shell ${SRC}) diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 12024d0b8673e1349e5a789d60632a278c7a696a..499c93e0ec96ddf0b3b655286207cf23b1f5694d 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -58,6 +58,8 @@ struct arguments { bool is_raw_time; bool is_use_passwd; char file[TSDB_FILENAME_LEN]; + char dir[TSDB_FILENAME_LEN]; + int threadNum; char* commands; int abort; }; @@ -74,12 +76,14 @@ void shellRunCommandOnServer(TAOS* con, char command[]); void read_history(); void write_history(); void source_file(TAOS* con, char* fptr); +void source_dir(TAOS* con, struct arguments* args); void get_history_path(char* history); void cleanup_handler(void* arg); void exitShell(); int shellDumpResult(TAOS* con, char* fname, int* error_no, bool printMode); void shellPrintNChar(char* str, int width, bool printMode); void shellGetGrantInfo(void *con); +int isCommentLine(char *line); #define max(a, b) ((int)(a) < (int)(b) ? (int)(b) : (int)(a)) /**************** Global variable declarations ****************/ diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index 9bf182ab0e7cbbe0be007d024c6a832902a3232b..16545a5fe807fc72ba730a4d0869a82b960878dd 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -13,20 +13,14 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include - #define __USE_XOPEN -#include - #include "os.h" #include "shell.h" #include "shellCommand.h" +extern int wcwidth(wchar_t c); +extern int wcswidth(const wchar_t *s, size_t n); typedef struct { char widthInString; char widthOnScreen; diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 324dde55d158733b8efb226e7486d83b14880b3d..ac66ab15d90ddaf3ceada296e713b1a9b3a9f75b 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -16,21 +16,13 @@ #define _XOPEN_SOURCE #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include - +#include #include "os.h" #include "shell.h" #include "shellCommand.h" #include "ttime.h" #include "tutil.h" +#include /**************** Global variables ****************/ #ifdef WINDOWS @@ -38,9 +30,19 @@ #elif defined(DARWIN) char CLIENT_VERSION[] = "Welcome to the TDengine shell from mac, client version:%s "; #else - char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, client version:%s "; + #ifdef CLUSTER + char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, enterprise client version:%s "; + #else + char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, community client version:%s "; + #endif #endif -char SERVER_VERSION[] = "server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; + +#ifdef CLUSTER + char SERVER_VERSION[] = "enterprise server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; +#else + char SERVER_VERSION[] = "community server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; +#endif + char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; @@ -109,6 +111,14 @@ TAOS *shellInit(struct arguments *args) { exit(EXIT_SUCCESS); } +#ifdef LINUX + if (args->dir[0] != 0) { + source_dir(con, args); + taos_close(con); + exit(EXIT_SUCCESS); + } +#endif + printf(SERVER_VERSION, taos_get_server_info(con)); return con; @@ -150,6 +160,8 @@ void shellReplaceCtrlChar(char *str) { } break; default: + *pstr = *str; + pstr++; break; } ctrlOn = false; @@ -254,7 +266,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { return; } - if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { + if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { fprintf(stdout, "Database changed.\n\n"); fflush(stdout); return; @@ -443,9 +455,9 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { printf("%*d|", l[i], *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - printf("%*lld|", l[i], *((int64_t *)row[i])); + printf("%*" PRId64 "|", l[i], *((int64_t *)row[i])); break; - case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_FLOAT: { #ifdef _TD_ARM_32_ float fv = 0; //memcpy(&fv, row[i], sizeof(float)); @@ -454,8 +466,9 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { #else printf("%*.5f|", l[i], *((float *)row[i])); #endif + } break; - case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_DOUBLE: { #ifdef _TD_ARM_32_ double dv = 0; //memcpy(&dv, row[i], sizeof(double)); @@ -464,6 +477,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { #else printf("%*.9f|", l[i], *((double *)row[i])); #endif + } break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: @@ -476,7 +490,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { break; case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { - printf(" %lld|", *(int64_t *)row[i]); + printf(" %" PRId64 "|", *(int64_t *)row[i]); } else { if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)((*(int64_t *)row[i]) / 1000000); @@ -526,9 +540,9 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { printf("%d\n", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - printf("%lld\n", *((int64_t *)row[i])); + printf("%" PRId64 "\n", *((int64_t *)row[i])); break; - case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_FLOAT: { #ifdef _TD_ARM_32_ float fv = 0; //memcpy(&fv, row[i], sizeof(float)); @@ -537,8 +551,9 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { #else printf("%.5f\n", *((float *)row[i])); #endif + } break; - case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_DOUBLE: { #ifdef _TD_ARM_32_ double dv = 0; //memcpy(&dv, row[i], sizeof(double)); @@ -547,7 +562,8 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { #else printf("%.9f\n", *((double *)row[i])); #endif - break; + } + break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: memset(t_str, 0, TSDB_MAX_BYTES_PER_ROW); @@ -557,7 +573,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { break; case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { - printf("%lld\n", *(int64_t *)row[i]); + printf("%" PRId64 "\n", *(int64_t *)row[i]); } else { if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)((*(int64_t *)row[i]) / 1000000); @@ -612,9 +628,9 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { fprintf(fp, "%d", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%lld", *((int64_t *)row[i])); + fprintf(fp, "%" PRId64, *((int64_t *)row[i])); break; - case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_FLOAT: { #ifdef _TD_ARM_32_ float fv = 0; //memcpy(&fv, row[i], sizeof(float)); @@ -623,8 +639,9 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { #else fprintf(fp, "%.5f", *((float *)row[i])); #endif + } break; - case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_DOUBLE: { #ifdef _TD_ARM_32_ double dv = 0; //memcpy(&dv, row[i], sizeof(double)); @@ -633,6 +650,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { #else fprintf(fp, "%.9f", *((double *)row[i])); #endif + } break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: @@ -642,7 +660,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { break; case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { - fprintf(fp, "%lld", *(int64_t *)row[i]); + fprintf(fp, "%" PRId64, *(int64_t *)row[i]); } else { if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)((*(int64_t *)row[i]) / 1000000); @@ -755,7 +773,7 @@ void taos_error(TAOS *con) { taos_free_result(pRes); } -static int isCommentLine(char *line) { +int isCommentLine(char *line) { if (line == NULL) return 1; return regex_match(line, "^\\s*#.*", REG_EXTENDED); @@ -771,6 +789,7 @@ void source_file(TAOS *con, char *fptr) { if (wordexp(fptr, &full_path, 0) != 0) { fprintf(stderr, "ERROR: illegal file name\n"); + free(cmd); return; } @@ -779,6 +798,7 @@ void source_file(TAOS *con, char *fptr) { if (access(fname, R_OK) == -1) { fprintf(stderr, "ERROR: file %s is not readable\n", fptr); wordfree(&full_path); + free(cmd); return; } @@ -786,6 +806,7 @@ void source_file(TAOS *con, char *fptr) { if (f == NULL) { fprintf(stderr, "ERROR: failed to open file %s\n", fname); wordfree(&full_path); + free(cmd); return; } @@ -840,7 +861,7 @@ void shellGetGrantInfo(void *con) { TAOS_FIELD *fields = taos_fetch_fields(result); TAOS_ROW row = taos_fetch_row(result); if (row == NULL) { - fprintf(stderr, "\nGrant information is empty.\n"); + fprintf(stderr, "\nFailed to get grant information from server. Abort.\n"); exit(0); } diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c new file mode 100644 index 0000000000000000000000000000000000000000..3292aa8e04bba29b8e3636a3b4988ae2f4cd8362 --- /dev/null +++ b/src/kit/shell/src/shellImport.c @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _XOPEN_SOURCE +#define _DEFAULT_SOURCE + +#include "os.h" +#include "shell.h" +#include "shellCommand.h" +#include "ttime.h" +#include "tutil.h" + +static char **shellSQLFiles = NULL; +static int32_t shellSQLFileNum = 0; +static char shellTablesSQLFile[TSDB_FILENAME_LEN] = {0}; + +typedef struct { + pthread_t threadID; + int threadIndex; + int totalThreads; + void *taos; +} ShellThreadObj; + +static int shellGetFilesNum(const char *directoryName, const char *prefix) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + if (fscanf(fp, "%d", &fileNum) != 1) { + fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd); + exit(0); + } + + if (fileNum <= 0) { + fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName); + exit(0); + } + + pclose(fp); + return fileNum; +} + +static void shellParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + while (fscanf(fp, "%s", fileArray[fileNum++])) { + if (strcmp(fileArray[fileNum-1], shellTablesSQLFile) == 0) { + fileNum--; + } + if (fileNum >= totalFiles) { + break; + } + } + + if (fileNum != totalFiles) { + fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName); + exit(0); + } + + pclose(fp); +} + +static void shellCheckTablesSQLFile(const char *directoryName) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/tables.sql", directoryName); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + while (fscanf(fp, "%s", shellTablesSQLFile)) { + break; + } + + pclose(fp); +} + +static void shellMallocSQLFiles() +{ + shellSQLFiles = (char**)calloc(shellSQLFileNum, sizeof(char*)); + for (int i = 0; i < shellSQLFileNum; i++) { + shellSQLFiles[i] = calloc(1, TSDB_FILENAME_LEN); + } +} + +static void shellGetDirectoryFileList(char *inputDir) +{ + struct stat fileStat; + if (stat(inputDir, &fileStat) < 0) { + fprintf(stderr, "ERROR: %s not exist\n", inputDir); + exit(0); + } + + if (fileStat.st_mode & S_IFDIR) { + shellCheckTablesSQLFile(inputDir); + shellSQLFileNum = shellGetFilesNum(inputDir, "sql"); + int totalSQLFileNum = shellSQLFileNum; + if (shellTablesSQLFile[0] != 0) { + shellSQLFileNum--; + } + shellMallocSQLFiles(); + shellParseDirectory(inputDir, "sql", shellSQLFiles, shellSQLFileNum); + fprintf(stdout, "\nstart to dispose %d files in %s\n", totalSQLFileNum, inputDir); + } + else { + fprintf(stderr, "ERROR: %s is not a directory\n", inputDir); + exit(0); + } +} + +static void shellSourceFile(TAOS *con, char *fptr) { + wordexp_t full_path; + int read_len = 0; + char * cmd = malloc(MAX_COMMAND_SIZE); + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + if (wordexp(fptr, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: illegal file name\n"); + return; + } + + char *fname = full_path.we_wordv[0]; + + if (access(fname, R_OK) == -1) { + fprintf(stderr, "ERROR: file %s is not readable\n", fptr); + wordfree(&full_path); + return; + } + + FILE *f = fopen(fname, "r"); + if (f == NULL) { + fprintf(stderr, "ERROR: failed to open file %s\n", fname); + wordfree(&full_path); + return; + } + + fprintf(stdout, "begin import file:%s\n", fname); + + int lineNo = 0; + while ((read_len = getline(&line, &line_len, f)) != -1) { + ++lineNo; + if (read_len >= MAX_COMMAND_SIZE) continue; + line[--read_len] = '\0'; + + if (read_len == 0 || isCommentLine(line)) { // line starts with # + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + if (taos_query(con, cmd)) { + fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo); + /* free local resouce: allocated memory/metric-meta refcnt */ + TAOS_RES *pRes = taos_use_result(con); + taos_free_result(pRes); + } + + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd_len = 0; + } + + free(cmd); + if (line) free(line); + wordfree(&full_path); + fclose(f); +} + +void* shellImportThreadFp(void *arg) +{ + ShellThreadObj *pThread = (ShellThreadObj*)arg; + for (int f = 0; f < shellSQLFileNum; ++f) { + if (f % pThread->totalThreads == pThread->threadIndex) { + char *SQLFileName = shellSQLFiles[f]; + shellSourceFile(pThread->taos, SQLFileName); + } + } + + return NULL; +} + +static void shellRunImportThreads(struct arguments* args) +{ + pthread_attr_t thattr; + ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj)); + for (int t = 0; t < args->threadNum; ++t) { + ShellThreadObj *pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = args->threadNum; + pThread->taos = taos_connect(args->host, args->user, args->password, args->database, tsMgmtShellPort); + if (pThread->taos == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taos)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, shellImportThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int t = 0; t < args->threadNum; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int t = 0; t < args->threadNum; ++t) { + taos_close(threadObj[t].taos); + } + free(threadObj); +} + +void source_dir(TAOS* con, struct arguments* args) { + shellGetDirectoryFileList(args->dir); + int64_t start = taosGetTimestampMs(); + + if (shellTablesSQLFile[0] != 0) { + shellSourceFile(con, shellTablesSQLFile); + int64_t end = taosGetTimestampMs(); + fprintf(stdout, "import %s finished, time spent %.2f seconds\n", shellTablesSQLFile, (end - start) / 1000.0); + } + + shellRunImportThreads(args); + int64_t end = taosGetTimestampMs(); + fprintf(stdout, "import %s finished, time spent %.2f seconds\n", args->dir, (end - start) / 1000.0); +} diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 145025cbe1c1230ecf7a40df6a14d76dd19d9cff..e9e988c6c687a8eaefd858d6628f50f45a142af7 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -13,27 +13,9 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #define __USE_XOPEN -#include +#include "os.h" #include "shell.h" #include "shellCommand.h" @@ -44,6 +26,7 @@ int indicator = 1; struct termios oldtio; +extern int wcwidth(wchar_t c); void insertChar(Command *cmd, char *c, int size); const char *argp_program_version = version; const char *argp_program_bug_address = ""; @@ -58,6 +41,8 @@ static struct argp_option options[] = { {"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."}, {"raw-time", 'r', 0, 0, "Output time as uint64_t."}, {"file", 'f', "FILE", 0, "Script to run without enter the shell."}, + {"directory", 'D', "DIRECTORY", 0, "Use multi-thread to import all SQL files in the directory separately."}, + {"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."}, {"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."}, {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."}, {0}}; @@ -107,6 +92,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { strcpy(arguments->file, full_path.we_wordv[0]); wordfree(&full_path); break; + case 'D': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + strcpy(arguments->dir, full_path.we_wordv[0]); + wordfree(&full_path); + break; + case 'T': + arguments->threadNum = atoi(arg); + break; case 'd': arguments->database = arg; break; @@ -123,6 +119,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { static struct argp argp = {options, parse_opt, args_doc, doc}; void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { + char verType[32] = {0}; + #ifdef CLUSTER + sprintf(verType, "enterprise version: %s\n", version); + #else + sprintf(verType, "community version: %s\n", version); + #endif + + argp_program_version = verType; + argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { error(10, 0, "ABORTED"); @@ -286,7 +291,10 @@ void *shellLoopQuery(void *arg) { pthread_cleanup_push(cleanup_handler, NULL); char *command = malloc(MAX_COMMAND_SIZE); - + if (command == NULL){ + tscError("failed to malloc command"); + return NULL; + } while (1) { // Read command from shell. @@ -295,10 +303,8 @@ void *shellLoopQuery(void *arg) { shellReadCommand(con, command); reset_terminal_mode(); - if (command != NULL) { - // Run the command - shellRunCommand(con, command); - } + // Run the command + shellRunCommand(con, command); } pthread_cleanup_pop(1); diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 63c9eac0db5ddb5001a5516df29f674b4d94c19c..a7b7e8383bafab2f76682488d131c0d2bfbe65d3 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -13,13 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include - #include "os.h" #include "shell.h" #include "tsclient.h" @@ -69,7 +62,19 @@ int checkVersion() { } // Global configurations -struct arguments args = {NULL, NULL, NULL, NULL, NULL, false, false, "\0", NULL}; +struct arguments args = { + .host = NULL, + .password = NULL, + .user = NULL, + .database = NULL, + .timezone = NULL, + .is_raw_time = false, + .is_use_passwd = false, + .file = "\0", + .dir = "\0", + .threadNum = 5, + .commands = NULL +}; /* * Main function. diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index 38a28e3079790d21783ba3d4e44ababd0ff843c5..599875fa6086276e0b7ea80b97da1e3791a4f73e 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -6,7 +6,7 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) INCLUDE_DIRECTORIES(inc) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) TARGET_LINK_LIBRARIES(taosdemo taos_static) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index df29a72e860904bfbca6defe0b5472d37f8bbc25..40fbabe1f76027860c8f3b863618f870b833a91f 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -30,7 +30,8 @@ #include #include "taos.h" -#pragma GCC diagnostic ignored "-Wmissing-braces" + +extern char configDir[]; #define BUFFER_SIZE 65536 #define MAX_DB_NAME_SIZE 64 @@ -64,7 +65,7 @@ static struct argp_option options[] = { /* Used by main to communicate with parse_opt. */ struct arguments { char *host; - int port; + uint16_t port; char *user; char *password; char *database; @@ -176,7 +177,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { fprintf(stderr, "Invalid path %s\n", arg); return -1; } - strcpy(configDir, full_path.we_wordv[0]); + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); wordfree(&full_path); break; case OPT_ABORT: @@ -264,30 +265,35 @@ double getCurrentTime(); void callBack(void *param, TAOS_RES *res, int code); int main(int argc, char *argv[]) { - struct arguments arguments = {NULL, - 0, - "root", - "taosdata", - "test", - "t", - false, - false, - "./output.txt", - 0, - "int", - "", + struct arguments arguments = {NULL, // host + 0, // port + "root", // user + "taosdata", // password + "test", // database + "t", // tb_prefix + false, // use_metric + false, // insert_only + "./output.txt", // output_file + 0, // mode + { + "int", // datatype "", "", "", "", "", "", - 8, - 1, - 1, - 1, - 1, - 50000}; + "" + }, + 8, // len_of_binary + 1, // num_of_CPR + 1, // num_of_connections + 1, // num_of_RPR + 1, // num_of_tables + 50000, // num_of_DPT + 0, // abort + NULL // arg_list + }; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ @@ -307,7 +313,7 @@ int main(int argc, char *argv[]) { enum MODE query_mode = arguments.mode; char *ip_addr = arguments.host; - int port = arguments.port; + uint16_t port = arguments.port; char *user = arguments.user; char *pass = arguments.password; char *db_name = arguments.database; @@ -340,7 +346,7 @@ int main(int argc, char *argv[]) { struct tm tm = *localtime(&tTime); fprintf(fp, "###################################################################\n"); - fprintf(fp, "# Server IP: %s:%d\n", ip_addr == NULL ? "localhost" : ip_addr, port); + fprintf(fp, "# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port); fprintf(fp, "# User: %s\n", user); fprintf(fp, "# Password: %s\n", pass); fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false"); diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index 2bcc020654c603b4f43c862e452d90d937d7466a..76b40d1c2d278bd8f13829a719e23c67402bec2e 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -6,7 +6,7 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) INCLUDE_DIRECTORIES(inc) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdump ${SRC}) TARGET_LINK_LIBRARIES(taosdump taos_static) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index f168a0d90fd68477d57019f81af0494e643a0c7f..c8ef3bc0481c72d284485d7c369db97de6d780e9 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -172,7 +172,7 @@ struct arguments { char *host; char *user; char *password; - int port; + uint16_t port; // output file char output[TSDB_FILENAME_LEN + 1]; char input[TSDB_FILENAME_LEN + 1]; diff --git a/src/modules/http/CMakeLists.txt b/src/modules/http/CMakeLists.txt index 911c5a3515852907e58d6811dc2b34ba73ed0d87..d8dccb526affec580adf5d69d7b10c033b51d0ad 100644 --- a/src/modules/http/CMakeLists.txt +++ b/src/modules/http/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) diff --git a/src/modules/http/inc/httpHandle.h b/src/modules/http/inc/httpHandle.h index e7ac0365c25109512be67e390cf7c6c4868ed3ef..1b746e15200e1dfa7f0b5dcfc0054120c94aee56 100644 --- a/src/modules/http/inc/httpHandle.h +++ b/src/modules/http/inc/httpHandle.h @@ -63,11 +63,13 @@ #define HTTP_WRITE_RETRY_TIMES 500 #define HTTP_WRITE_WAIT_TIME_MS 5 #define HTTP_EXPIRED_TIME 60000 -#define HTTP_DELAY_CLOSE_TIME_MS 1000 +#define HTTP_DELAY_CLOSE_TIME_MS 500 #define HTTP_COMPRESS_IDENTITY 0 #define HTTP_COMPRESS_GZIP 2 +#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN * 2 + 1) + typedef enum { HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_HANDLING, @@ -83,7 +85,7 @@ typedef struct { int expire; int access; void *taos; - char id[TSDB_USER_LEN]; + char id[HTTP_SESSION_ID_LEN + 1]; } HttpSession; typedef enum { @@ -210,7 +212,7 @@ typedef struct HttpThread { typedef struct _http_server_obj_ { char label[HTTP_LABEL_SIZE]; char serverIp[16]; - short serverPort; + uint16_t serverPort; int cacheContext; int sessionExpire; int numOfThreads; @@ -233,7 +235,7 @@ bool httpCheckUsedbSql(char *sql); void httpTimeToString(time_t t, char *buf, int buflen); // http init method -void *httpInitServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle); +void *httpInitServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle); void httpCleanUpServer(HttpServer *pServer); // http server connection diff --git a/src/modules/http/src/gcJson.c b/src/modules/http/src/gcJson.c index 0cb20ec7e1e4ab1608415cc88e622a4731711858..ecd923564473a534018df4bcfd8269e9da55fb14 100644 --- a/src/modules/http/src/gcJson.c +++ b/src/modules/http/src/gcJson.c @@ -119,7 +119,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, cmd->numOfRows += numOfRows; } - for (int i = 0; i < numOfRows; ++i) { + for (int k = 0; k < numOfRows; ++k) { TAOS_ROW row = taos_fetch_row(result); // for group by diff --git a/src/modules/http/src/httpAuth.c b/src/modules/http/src/httpAuth.c index 9d9ead73246837c78c3b534785a1ddc1cbc99055..4503accc0acdc74f1035b91bb2b85a344eb143fe 100644 --- a/src/modules/http/src/httpAuth.c +++ b/src/modules/http/src/httpAuth.c @@ -50,6 +50,7 @@ bool httpParseBasicAuthToken(HttpContext *pContext, char *token, int len) { return false; } strncpy(pContext->user, base64, (size_t)user_len); + pContext->user[user_len] = 0; char *password = user + 1; int pass_len = (int)((base64 + outlen) - password); @@ -60,6 +61,7 @@ bool httpParseBasicAuthToken(HttpContext *pContext, char *token, int len) { return false; } strncpy(pContext->pass, password, (size_t)pass_len); + pContext->pass[pass_len] = 0; free(base64); httpTrace("context:%p, fd:%d, ip:%s, basic token parsed success, user:%s", pContext, pContext->fd, pContext->ipstr, diff --git a/src/modules/http/src/httpCode.c b/src/modules/http/src/httpCode.c index 9188f421b159029251b90e7ead4b17d3c7683621..230c0de2e09046eb9384817498b13adea4ddb42c 100644 --- a/src/modules/http/src/httpCode.c +++ b/src/modules/http/src/httpCode.c @@ -69,7 +69,7 @@ char* httpMsg[] = { "field value type should be number or string", "field value is null", // 51 "parse basic auth token error", - "parse taosd auth token error", + "parse http auth token error", "host type should be string", // grafana diff --git a/src/modules/http/src/httpHandle.c b/src/modules/http/src/httpHandle.c index 16e8378fb8d7c89299ff693b81933b93977384ea..b46fa11cde6cdf93bba737ccbc223f0f9de910be 100644 --- a/src/modules/http/src/httpHandle.c +++ b/src/modules/http/src/httpHandle.c @@ -279,8 +279,7 @@ bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { httpParseChunkedBody(pContext, pParser, false); return HTTP_CHECK_BODY_SUCCESS; } else { - httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, - pContext->ipstr); + httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr); if (!httpReadDataImp(pContext)) { httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); return HTTP_CHECK_BODY_ERROR; @@ -293,19 +292,14 @@ bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) { int dataReadLen = pParser->bufsize - (int)(pParser->data.pos - pParser->buffer); if (dataReadLen > pParser->data.len) { - httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, dataReadLen:%d > pContext->data.len:%d", - pContext, pContext->fd, pContext->ipstr, dataReadLen, pParser->data.len); + httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, read size:%d dataReadLen:%d > pContext->data.len:%d", + pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR); return HTTP_CHECK_BODY_ERROR; } else if (dataReadLen < pParser->data.len) { - httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, dataReadLen:%d < pContext->data.len:%d, continue read", - pContext, pContext->fd, pContext->ipstr, dataReadLen, pParser->data.len); - if (!httpReadDataImp(pContext)) { - httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); - return HTTP_CHECK_BODY_ERROR; - } else { - return HTTP_CHECK_BODY_CONTINUE; - } + httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read", + pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); + return HTTP_CHECK_BODY_CONTINUE; } else { return HTTP_CHECK_BODY_SUCCESS; } diff --git a/src/modules/http/src/httpServer.c b/src/modules/http/src/httpServer.c index 8b981e0f844e9e803ed1e1159aa188313818c79e..171f811b7d4f07d6ad2a5836669e9c3782ec0e41 100644 --- a/src/modules/http/src/httpServer.c +++ b/src/modules/http/src/httpServer.c @@ -13,23 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "taosmsg.h" #include "tlog.h" @@ -72,7 +56,7 @@ void httpRemoveContextFromEpoll(HttpThread *pThread, HttpContext *pContext) { } bool httpAlterContextState(HttpContext *pContext, HttpContextState srcState, HttpContextState destState) { - return (__sync_val_compare_and_swap_32(&pContext->state, srcState, destState) == srcState); + return (atomic_val_compare_exchange_32(&pContext->state, srcState, destState) == srcState); } void httpFreeContext(HttpServer *pServer, HttpContext *pContext); @@ -117,15 +101,15 @@ void httpFreeContext(HttpServer *pServer, HttpContext *pContext) { void httpCleanUpContextTimer(HttpContext *pContext) { if (pContext->timer != NULL) { taosTmrStopA(&pContext->timer); - httpTrace("context:%p, ip:%s, close timer:%p", pContext, pContext->ipstr, pContext->timer); + //httpTrace("context:%p, ip:%s, close timer:%p", pContext, pContext->ipstr, pContext->timer); pContext->timer = NULL; } } -void httpCleanUpContext(HttpContext *pContext) { - httpTrace("context:%p, start the clean up operation", pContext); - __sync_val_compare_and_swap_64(&pContext->signature, pContext, 0); - if (pContext->signature != NULL) { +void httpCleanUpContext(HttpContext *pContext, void *unused) { + httpTrace("context:%p, start the clean up operation, sig:%p", pContext, pContext->signature); + void *sig = atomic_val_compare_exchange_ptr(&pContext->signature, pContext, 0); + if (sig == NULL) { httpTrace("context:%p is freed by another thread.", pContext); return; } @@ -200,7 +184,7 @@ bool httpInitContext(HttpContext *pContext) { void httpCloseContext(HttpThread *pThread, HttpContext *pContext) { - taosTmrReset(httpCleanUpContext, HTTP_DELAY_CLOSE_TIME_MS, pContext, pThread->pServer->timerHandle, &pContext->timer); + taosTmrReset((TAOS_TMR_CALLBACK)httpCleanUpContext, HTTP_DELAY_CLOSE_TIME_MS, pContext, pThread->pServer->timerHandle, &pContext->timer); httpTrace("context:%p, fd:%d, ip:%s, state:%s will be closed after:%d ms, timer:%p", pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), HTTP_DELAY_CLOSE_TIME_MS, pContext->timer); } @@ -289,7 +273,7 @@ void httpCleanUpConnect(HttpServer *pServer) { taosCloseSocket(pThread->pollFd); while (pThread->pHead) { - httpCleanUpContext(pThread->pHead); + httpCleanUpContext(pThread->pHead, 0); } pthread_cancel(pThread->thread); @@ -345,8 +329,6 @@ bool httpReadDataImp(HttpContext *pContext) { } pParser->buffer[pParser->bufsize] = 0; - httpTrace("context:%p, fd:%d, ip:%s, thread:%s, read size:%d", - pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pParser->bufsize); return true; } @@ -399,10 +381,12 @@ bool httpReadData(HttpThread *pThread, HttpContext *pContext) { int ret = httpCheckReadCompleted(pContext); if (ret == HTTP_CHECK_BODY_CONTINUE) { taosTmrReset(httpCloseContextByServerForExpired, HTTP_EXPIRED_TIME, pContext, pThread->pServer->timerHandle, &pContext->timer); - httpTrace("context:%p, fd:%d, ip:%s, not finished yet, try another times, timer:%p", pContext, pContext->fd, pContext->ipstr, pContext->timer); + //httpTrace("context:%p, fd:%d, ip:%s, not finished yet, try another times, timer:%p", pContext, pContext->fd, pContext->ipstr, pContext->timer); return false; } else if (ret == HTTP_CHECK_BODY_SUCCESS){ httpCleanUpContextTimer(pContext); + httpTrace("context:%p, fd:%d, ip:%s, thread:%s, read size:%d, dataLen:%d", + pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->parser.bufsize, pContext->parser.data.len); if (httpDecompressData(pContext)) { return true; } else { @@ -494,7 +478,7 @@ void httpProcessHttpData(void *param) { } else { if (httpReadData(pThread, pContext)) { (*(pThread->processData))(pContext); - __sync_fetch_and_add(&pThread->pServer->requestNum, 1); + atomic_fetch_add_32(&pThread->pServer->requestNum, 1); } } } @@ -543,8 +527,8 @@ void httpAcceptHttpConnection(void *arg) { totalFds += pServer->pThreads[i].numOfFds; } - if (totalFds > tsHttpCacheSessions * 20) { - httpError("fd:%d, ip:%s:%u, totalFds:%d larger than httpCacheSessions:%d*20, refuse connection", + if (totalFds > tsHttpCacheSessions * 100) { + httpError("fd:%d, ip:%s:%u, totalFds:%d larger than httpCacheSessions:%d*100, refuse connection", connFd, inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port), totalFds, tsHttpCacheSessions); taosCloseSocket(connFd); continue; diff --git a/src/modules/http/src/httpSession.c b/src/modules/http/src/httpSession.c index 8e8e39c8b07947445256b94178eba52e40724883..568936ede64c24250c16f590afd5a54378979ed3 100644 --- a/src/modules/http/src/httpSession.c +++ b/src/modules/http/src/httpSession.c @@ -41,8 +41,8 @@ void httpCreateSession(HttpContext *pContext, void *taos) { pthread_mutex_lock(&server->serverMutex); if (pContext->session != NULL && pContext->session == pContext->session->signature) { - httpTrace("context:%p, fd:%d, ip:%s, user:%s, set exist session:%p:%s:%p expired", pContext, pContext->fd, - pContext->ipstr, pContext->user, pContext->session, pContext->session->id, pContext->session->taos); + httpTrace("context:%p, fd:%d, ip:%s, user:%s, set exist session:%p:%p expired", pContext, pContext->fd, + pContext->ipstr, pContext->user, pContext->session, pContext->session->taos); pContext->session->expire = 0; pContext->session->access--; } @@ -51,7 +51,7 @@ void httpCreateSession(HttpContext *pContext, void *taos) { session.taos = taos; session.expire = (int)taosGetTimestampSec() + server->sessionExpire; session.access = 1; - strcpy(session.id, pContext->user); + snprintf(session.id, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass); pContext->session = (HttpSession *)taosAddStrHash(server->pSessionHash, session.id, (char *)(&session)); if (pContext->session == NULL) { httpError("context:%p, fd:%d, ip:%s, user:%s, error:%s", pContext, pContext->fd, pContext->ipstr, pContext->user, @@ -62,20 +62,23 @@ void httpCreateSession(HttpContext *pContext, void *taos) { } pContext->session->signature = pContext->session; - httpTrace("context:%p, fd:%d, ip:%s, user:%s, create a new session:%p:%s:%p", pContext, pContext->fd, pContext->ipstr, - pContext->user, pContext->session, pContext->session->id, pContext->session->taos); + httpTrace("context:%p, fd:%d, ip:%s, user:%s, create a new session:%p:%p", pContext, pContext->fd, pContext->ipstr, + pContext->user, pContext->session, pContext->session->taos); pthread_mutex_unlock(&server->serverMutex); } -void httpFetchSession(HttpContext *pContext) { +void httpFetchSessionImp(HttpContext *pContext) { HttpServer *server = pContext->pThread->pServer; pthread_mutex_lock(&server->serverMutex); - pContext->session = (HttpSession *)taosGetStrHashData(server->pSessionHash, pContext->user); + char sessionId[HTTP_SESSION_ID_LEN]; + snprintf(sessionId, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass); + + pContext->session = (HttpSession *)taosGetStrHashData(server->pSessionHash, sessionId); if (pContext->session != NULL && pContext->session == pContext->session->signature) { pContext->session->access++; - httpTrace("context:%p, fd:%d, ip:%s, user:%s, find an exist session:%p:%s:%p, access:%d, expire:%d", - pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session, pContext->session->id, + httpTrace("context:%p, fd:%d, ip:%s, user:%s, find an exist session:%p:%p, access:%d, expire:%d", + pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session, pContext->session->taos, pContext->session->access, pContext->session->expire); pContext->session->expire = (int)taosGetTimestampSec() + server->sessionExpire; } else { @@ -86,6 +89,20 @@ void httpFetchSession(HttpContext *pContext) { pthread_mutex_unlock(&server->serverMutex); } +void httpFetchSession(HttpContext *pContext) { + if (pContext->session == NULL) { + httpFetchSessionImp(pContext); + } else { + char sessionId[HTTP_SESSION_ID_LEN]; + snprintf(sessionId, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass); + if (strcmp(pContext->session->id, sessionId) != 0) { + httpError("context:%p, fd:%d, ip:%s, user:%s, password may be changed", pContext, pContext->fd, pContext->ipstr, pContext->user); + httpRestoreSession(pContext); + httpFetchSessionImp(pContext); + } + } +} + void httpRestoreSession(HttpContext *pContext) { HttpServer * server = pContext->pThread->pServer; @@ -97,15 +114,16 @@ void httpRestoreSession(HttpContext *pContext) { return; } session->access--; - httpTrace("context:%p, ip:%s, user:%s, restore session:%p:%s:%p, access:%d, expire:%d", - pContext, pContext->ipstr, pContext->user, session, session->id, session->taos, + httpTrace("context:%p, ip:%s, user:%s, restore session:%p:%p, access:%d, expire:%d", + pContext, pContext->ipstr, pContext->user, session, session->taos, session->access, pContext->session->expire); + pContext->session = NULL; pthread_mutex_unlock(&server->serverMutex); } void httpResetSession(char *session) { HttpSession *pSession = (HttpSession *)session; - httpTrace("close session:%p:%s:%p", pSession, pSession->id, pSession->taos); + httpTrace("close session:%p:%p", pSession, pSession->taos); if (pSession->taos != NULL) { taos_close(pSession->taos); pSession->taos = NULL; @@ -144,12 +162,12 @@ int httpSessionExpired(char *session) { return 0; // un-expired, so return false } if (pSession->access > 0) { - httpTrace("session:%p:%s:%p is expired, but still access:%d", pSession, pSession->id, pSession->taos, + httpTrace("session:%p:%p is expired, but still access:%d", pSession, pSession->taos, pSession->access); return 0; // still used, so return false } - httpTrace("need close session:%p:%s:%p for it expired, cur:%d, expire:%d, invertal:%d", - pSession, pSession->id, pSession->taos, cur, pSession->expire, cur - pSession->expire); + httpTrace("need close session:%p:%p for it expired, cur:%d, expire:%d, invertal:%d", + pSession, pSession->taos, cur, pSession->expire, cur - pSession->expire); } return 1; diff --git a/src/modules/http/src/httpSql.c b/src/modules/http/src/httpSql.c index 9254658d58818166b9ff45e7ac765735e4f47f9a..4696e80dc785112a8345e6a74f42dd86e0295fa7 100644 --- a/src/modules/http/src/httpSql.c +++ b/src/modules/http/src/httpSql.c @@ -24,11 +24,11 @@ #include "httpResp.h" #include "taos.h" #include "tsclient.h" +#include "tnote.h" -void *taos_connect_a(char *ip, char *user, char *pass, char *db, int port, void (*fp)(void *, TAOS_RES *, int), +void *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos); void httpProcessMultiSql(HttpContext *pContext); -void taosNotePrint(const char * const format, ...); void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) { HttpContext *pContext = (HttpContext *)param; @@ -165,7 +165,7 @@ void httpProcessMultiSql(HttpContext *pContext) { char *sql = httpGetCmdsString(pContext, cmd->sql); httpDump("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, sql); - taosNotePrint(sql); + taosNotePrintHttp(sql); taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext); } @@ -298,7 +298,7 @@ void httpProcessSingleSqlCmd(HttpContext *pContext) { httpDump("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr, pContext->user, sql); - taosNotePrint(sql); + taosNotePrintHttp(sql); taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext); } @@ -378,9 +378,7 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int code) { } void httpProcessRequest(HttpContext *pContext) { - if (pContext->session == NULL) { - httpFetchSession(pContext); - } + httpFetchSession(pContext); if (pContext->session == NULL || pContext->session != pContext->session->signature || pContext->reqType == HTTP_REQTYPE_LOGIN) { diff --git a/src/modules/http/src/httpSystem.c b/src/modules/http/src/httpSystem.c index 5c0d9a69d60f038a1c37ed48bb0c4a6e27501966..efc24c15dd68fbd6105f896bf07ea48ed1db95c4 100644 --- a/src/modules/http/src/httpSystem.c +++ b/src/modules/http/src/httpSystem.c @@ -42,7 +42,7 @@ #endif static HttpServer *httpServer = NULL; -void taosInitNote(int numOfNoteLines, int maxNotes); +void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); int httpInitSystem() { taos_init(); @@ -50,7 +50,7 @@ int httpInitSystem() { httpServer = (HttpServer *)malloc(sizeof(HttpServer)); memset(httpServer, 0, sizeof(HttpServer)); - strcpy(httpServer->label, "taosh"); + strcpy(httpServer->label, "rest"); strcpy(httpServer->serverIp, tsHttpIp); httpServer->serverPort = tsHttpPort; httpServer->cacheContext = tsHttpCacheSessions; @@ -61,7 +61,7 @@ int httpInitSystem() { pthread_mutex_init(&httpServer->serverMutex, NULL); if (tsHttpEnableRecordSql != 0) { - taosInitNote(tsNumOfLogLines / 10, 1); + taosInitNote(tsNumOfLogLines / 10, 1, (char*)"http_note"); } restInitHandle(httpServer); adminInitHandle(httpServer); @@ -77,7 +77,7 @@ int httpStartSystem() { if (httpServer == NULL) { httpError("http server is null"); - return -1; + httpInitSystem(); } if (httpServer->pContextPool == NULL) { @@ -89,7 +89,7 @@ int httpStartSystem() { } if (httpServer->timerHandle == NULL) { - httpServer->timerHandle = taosTmrInit(tsHttpCacheSessions * 20 + 100, 1000, 60000, "http"); + httpServer->timerHandle = taosTmrInit(tsHttpCacheSessions * 100 + 100, 200, 60000, "http"); } if (httpServer->timerHandle == NULL) { httpError("http init timer failed"); @@ -148,7 +148,7 @@ void httpCleanUpSystem() { void httpGetReqCount(int32_t *httpReqestNum) { if (httpServer != NULL) { - *httpReqestNum = __sync_fetch_and_and(&httpServer->requestNum, 0); + *httpReqestNum = atomic_exchange_32(&httpServer->requestNum, 0); } else { *httpReqestNum = 0; } diff --git a/src/modules/http/src/restJson.c b/src/modules/http/src/restJson.c index 8aa0ac5069bde6f8e339c6f64c9010c1b57a1d31..6c04d39f45f91e93a94dbe5b71dd0aa606979b2b 100644 --- a/src/modules/http/src/restJson.c +++ b/src/modules/http/src/restJson.c @@ -94,7 +94,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int num_fields = taos_num_fields(result); TAOS_FIELD *fields = taos_fetch_fields(result); - for (int i = 0; i < numOfRows; ++i) { + for (int k = 0; k < numOfRows; ++k) { TAOS_ROW row = taos_fetch_row(result); // data row array begin diff --git a/src/modules/http/src/tgHandle.c b/src/modules/http/src/tgHandle.c index ac17d6da0968b8a0015da0380b17024d3b4f3e53..cec1e40c4c4cd0efc3bb5d1c1ab811ee811c9773 100644 --- a/src/modules/http/src/tgHandle.c +++ b/src/modules/http/src/tgHandle.c @@ -215,7 +215,7 @@ ParseEnd: } } -int tgParseSchema(char *content, char*fileName) { +int tgParseSchema(const char *content, char*fileName) { cJSON *root = cJSON_Parse(content); if (root == NULL) { httpError("failed to parse telegraf schema file:%s, invalid json format, content:%s", fileName, content); @@ -248,7 +248,7 @@ int tgParseSchema(char *content, char*fileName) { return size; } -int tgReadSchema(const char *fileName) { +int tgReadSchema(char *fileName) { FILE *fp = fopen(fileName, "r"); if (fp == NULL) { return -1; @@ -262,6 +262,8 @@ int tgReadSchema(const char *fileName) { size_t result = fread(content, 1, contentSize, fp); if (result != contentSize) { httpError("failed to read telegraf schema file:%s", fileName); + fclose(fp); + free(content); return -1; } diff --git a/src/modules/monitor/CMakeLists.txt b/src/modules/monitor/CMakeLists.txt index ff8ddb1c2daac76823c98ea6c8044655fb34e66b..44ac8aae0b6d9e72c6e5a824d874c38972e93348 100644 --- a/src/modules/monitor/CMakeLists.txt +++ b/src/modules/monitor/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) diff --git a/src/modules/monitor/src/monitorSystem.c b/src/modules/monitor/src/monitorSystem.c index 013d86050ecc142c7bc9c61b6ff6c5bf9e611ace..9d132e51ce379f64fadabc4a57e9073b7d905904 100644 --- a/src/modules/monitor/src/monitorSystem.c +++ b/src/modules/monitor/src/monitorSystem.c @@ -14,6 +14,7 @@ */ #include "monitor.h" +#include #include #include #include @@ -61,7 +62,7 @@ typedef struct { MonitorConn *monitor = NULL; -TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, int port, void (*fp)(void *, TAOS_RES *, int), +TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos); void monitorInitConn(void *para, void *unused); void monitorInitConnCb(void *param, TAOS_RES *result, int code); @@ -95,6 +96,9 @@ int monitorInitSystem() { } int monitorStartSystem() { + if (monitor == NULL) { + monitorInitSystem(); + } taosTmrReset(monitorInitConn, 10, NULL, tscTmr, &monitor->initTimer); return 0; } @@ -110,11 +114,7 @@ void monitorInitConn(void *para, void *unused) { monitor->state = MONITOR_STATE_INITIALIZING; if (monitor->privateIpStr[0] == 0) { -#ifdef CLUSTER strcpy(monitor->privateIpStr, tsPrivateIp); -#else - strcpy(monitor->privateIpStr, tsInternalIp); -#endif for (int i = 0; i < TSDB_IPv4ADDR_LEN; ++i) { if (monitor->privateIpStr[i] == '.') { monitor->privateIpStr[i] = '_'; @@ -131,7 +131,7 @@ void monitorInitConn(void *para, void *unused) { void monitorInitConnCb(void *param, TAOS_RES *result, int code) { if (code < 0) { - monitorError("monitor:%p, connect to taosd failed, code:%d", monitor->conn, code); + monitorError("monitor:%p, connect to database failed, code:%d", monitor->conn, code); taos_close(monitor->conn); monitor->conn = NULL; monitor->state = MONITOR_STATE_UN_INIT; @@ -139,7 +139,7 @@ void monitorInitConnCb(void *param, TAOS_RES *result, int code) { return; } - monitorTrace("monitor:%p, connect to taosd success, code:%d", monitor->conn, code); + monitorTrace("monitor:%p, connect to database success, code:%d", monitor->conn, code); monitorInitDatabase(); } @@ -164,11 +164,7 @@ void dnodeBuildMonitorSql(char *sql, int cmd) { tsMonitorDbName, IP_LEN_STR + 1); } else if (cmd == MONITOR_CMD_CREATE_TB_DN) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn_%s using %s.dn tags('%s')", tsMonitorDbName, -#ifdef CLUSTER monitor->privateIpStr, tsMonitorDbName, tsPrivateIp); -#else - monitor->privateIpStr, tsMonitorDbName, tsInternalIp); -#endif } else if (cmd == MONITOR_CMD_CREATE_MT_ACCT) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct(ts timestamp " @@ -223,10 +219,8 @@ void monitorInitDatabaseCb(void *param, TAOS_RES *result, int code) { taosLogSqlFp = monitorExecuteSQL; #ifdef CLUSTER taosLogAcctFp = monitorSaveAcctLog; - monitorLPrint("dnode:%s is started", tsPrivateIp); -#else - monitorLPrint("dnode:%s is started", tsInternalIp); #endif + monitorLPrint("dnode:%s is started", tsPrivateIp); } monitor->cmdIndex++; monitorInitDatabase(); @@ -242,11 +236,7 @@ void monitorStopSystem() { return; } -#ifdef CLUSTER - monitorLPrint("dnode:%s is stopped", tsPrivateIp); -#else - monitorLPrint("dnode:%s is stopped", tsInternalIp); -#endif + monitorLPrint("dnode:%s monitor module is stopped", tsPrivateIp); monitor->state = MONITOR_STATE_STOPPED; taosLogFp = NULL; if (monitor->initTimer != NULL) { @@ -373,7 +363,7 @@ void monitorSaveSystemInfo() { int64_t ts = taosGetTimestampUs(); char * sql = monitor->sql; - int pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn_%s values(%ld", tsMonitorDbName, monitor->privateIpStr, ts); + int pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn_%s values(%" PRId64, tsMonitorDbName, monitor->privateIpStr, ts); pos += monitorBuildCpuSql(sql + pos); pos += monitorBuildMemorySql(sql + pos); @@ -399,21 +389,29 @@ void monitorSaveAcctLog(char *acctId, int64_t currentPointsPerSecond, int64_t ma char sql[1024] = {0}; sprintf(sql, "insert into %s.acct_%s using %s.acct tags('%s') values(now" - ", %ld, %ld " - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 ", %d)", - tsMonitorDbName, acctId, tsMonitorDbName, acctId, currentPointsPerSecond, maxPointsPerSecond, totalTimeSeries, - maxTimeSeries, totalStorage, maxStorage, totalQueryTime, maxQueryTime, totalInbound, maxInbound, - totalOutbound, maxOutbound, totalDbs, maxDbs, totalUsers, maxUsers, totalStreams, maxStreams, totalConns, - maxConns, accessState); + tsMonitorDbName, acctId, tsMonitorDbName, acctId, + currentPointsPerSecond, maxPointsPerSecond, + totalTimeSeries, maxTimeSeries, + totalStorage, maxStorage, + totalQueryTime, maxQueryTime, + totalInbound, maxInbound, + totalOutbound, maxOutbound, + totalDbs, maxDbs, + totalUsers, maxUsers, + totalStreams, maxStreams, + totalConns, maxConns, + accessState); monitorTrace("monitor:%p, save account info, sql %s", monitor->conn, sql); taos_query_a(monitor->conn, sql, dnodeMontiorInsertAcctCallback, "account"); @@ -428,7 +426,7 @@ void monitorSaveLog(int level, const char *const format, ...) { return; } - int len = snprintf(sql, (size_t)max_length, "import into %s.log values(%ld, %d,'", tsMonitorDbName, + int len = snprintf(sql, (size_t)max_length, "import into %s.log values(%" PRId64 ", %d,'", tsMonitorDbName, taosGetTimestampUs(), level); va_start(argpointer, format); @@ -436,11 +434,7 @@ void monitorSaveLog(int level, const char *const format, ...) { va_end(argpointer); if (len > max_length) len = max_length; -#ifdef CLUSTER len += sprintf(sql + len, "', '%s')", tsPrivateIp); -#else - len += sprintf(sql + len, "', '%s')", tsInternalIp); -#endif sql[len++] = 0; monitorTrace("monitor:%p, save log, sql: %s", monitor->conn, sql); diff --git a/src/os/darwin/inc/os.h b/src/os/darwin/inc/os.h index c9f461d9a0d93e74710f13496e94638cd417bbbb..bf86103e8400725b991a9716de25b2018ec5a61d 100644 --- a/src/os/darwin/inc/os.h +++ b/src/os/darwin/inc/os.h @@ -47,7 +47,7 @@ #define taosCloseSocket(x) \ { \ - if (VALIDFD(x)) { \ + if (FD_VALID(x)) { \ close(x); \ x = -1; \ } \ @@ -55,12 +55,89 @@ #define taosWriteSocket(fd, buf, len) write(fd, buf, len) #define taosReadSocket(fd, buf, len) read(fd, buf, len) -#define __sync_val_compare_and_swap_64 __sync_val_compare_and_swap -#define __sync_val_compare_and_swap_32 __sync_val_compare_and_swap -#define __sync_add_and_fetch_64 __sync_add_and_fetch -#define __sync_add_and_fetch_32 __sync_add_and_fetch -int32_t __sync_val_load_32(int32_t *ptr); -void __sync_val_restore_32(int32_t *ptr, int32_t newval); +#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_64(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_ptr(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) + +#define atomic_store_8(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_16(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_32(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_64(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_ptr(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_exchange_8(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_16(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_32(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_64(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_ptr(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_val_compare_exchange_8 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_16 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_32 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_64 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap + +#define atomic_add_fetch_8(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_16(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_32(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_64(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_ptr(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_add_8(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_16(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_32(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_64(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_ptr(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_sub_fetch_8(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_16(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_32(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_64(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_ptr(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_sub_8(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_16(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_32(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_64(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_ptr(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_and_fetch_8(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_16(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_32(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_64(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_ptr(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_and_8(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_16(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_32(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_64(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_ptr(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_or_fetch_8(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_16(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_32(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_64(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_ptr(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_or_8(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_16(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_32(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_64(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_ptr(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_xor_fetch_8(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_16(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_32(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_64(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_ptr(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_xor_8(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_16(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_32(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_64(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_ptr(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) #define SWAP(a, b, c) \ do { \ diff --git a/src/os/darwin/src/tdarwin.c b/src/os/darwin/src/tdarwin.c index 2f97c7c37699a769186cb57fc7e4e47274a1629c..133bb4893cc9aa2f8b561036ffaff6a53e0db3a7 100644 --- a/src/os/darwin/src/tdarwin.c +++ b/src/os/darwin/src/tdarwin.c @@ -170,12 +170,12 @@ int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optle return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); } -int taosOpenUDClientSocket(char *ip, short port) { +int taosOpenUDClientSocket(char *ip, uint16_t port) { int sockFd = 0; struct sockaddr_un serverAddr; int ret; char name[128]; - sprintf(name, "%s.%d", ip, port); + sprintf(name, "%s.%hu", ip, port); sockFd = socket(AF_UNIX, SOCK_STREAM, 0); @@ -198,14 +198,13 @@ int taosOpenUDClientSocket(char *ip, short port) { return sockFd; } -int taosOpenUDServerSocket(char *ip, short port) { +int taosOpenUDServerSocket(char *ip, uint16_t port) { struct sockaddr_un serverAdd; int sockFd; char name[128]; pTrace("open ud socket:%s", name); - // if (tsAllowLocalhost) ip = "0.0.0.0"; - sprintf(name, "%s.%d", ip, port); + sprintf(name, "%s.%hu", ip, port); bzero((char *)&serverAdd, sizeof(serverAdd)); serverAdd.sun_family = AF_UNIX; @@ -243,10 +242,6 @@ int taosInitTimer(void (*callback)(int), int ms) { return setitimer(ITIMER_REAL, &tv, NULL); } -char *taosCharsetReplace(char *charsetstr) { - return charsetstr; -} - void taosGetSystemTimezone() { // get and set default timezone SGlobalConfig *cfg_timezone = tsGetConfigOption("timezone"); @@ -300,7 +295,7 @@ void taosGetSystemInfo() { taosGetSystemLocale(); } -void *taosInitTcpClient(char *ip, short port, char *flabel, int num, void *fp, void *shandle) { +void *taosInitTcpClient(char *ip, uint16_t port, char *flabel, int num, void *fp, void *shandle) { tError("function taosInitTcpClient is not implemented in darwin system, exit!"); exit(0); } @@ -310,12 +305,12 @@ void taosCloseTcpClientConnection(void *chandle) { exit(0); } -void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, short port) { +void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, uint16_t port) { tError("function taosOpenTcpClientConnection is not implemented in darwin system, exit!"); exit(0); } -int taosSendTcpClientData(unsigned int ip, short port, char *data, int len, void *chandle) { +int taosSendTcpClientData(unsigned int ip, uint16_t port, char *data, int len, void *chandle) { tError("function taosSendTcpClientData is not implemented in darwin system, exit!"); exit(0); } @@ -335,12 +330,12 @@ void taosCleanUpTcpServer(void *handle) { exit(0); } -void *taosInitTcpServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle) { +void *taosInitTcpServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle) { tError("function taosInitTcpServer is not implemented in darwin system, exit!"); exit(0); } -int taosSendTcpServerData(unsigned int ip, short port, char *data, int len, void *chandle) { +int taosSendTcpServerData(unsigned int ip, uint16_t port, char *data, int len, void *chandle) { tError("function taosSendTcpServerData is not implemented in darwin system, exit!"); exit(0); } diff --git a/src/os/linux/CMakeLists.txt b/src/os/linux/CMakeLists.txt index 8a4cc56f72211953f11512981c99dc7076a0c0c5..a702cac759a3dea135da05689118b763631e83f9 100644 --- a/src/os/linux/CMakeLists.txt +++ b/src/os/linux/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) diff --git a/src/os/linux/inc/os.h b/src/os/linux/inc/os.h index 004896960eb18881e4dab1b8dbbb512744619790..acfd284737cadbe5232983958ef79546117d4ef8 100644 --- a/src/os/linux/inc/os.h +++ b/src/os/linux/inc/os.h @@ -1,17 +1,17 @@ /* -* Copyright (c) 2019 TAOS Data, Inc. -* -* This program is free software: you can use, redistribute, and/or modify -* it under the terms of the GNU Affero General Public License, version 3 -* or later ("AGPL"), as published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, but WITHOUT -* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -* FITNESS FOR A PARTICULAR PURPOSE. -* -* You should have received a copy of the GNU Affero General Public License -* along with this program. If not, see . -*/ + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ #ifndef TDENGINE_PLATFORM_LINUX_H #define TDENGINE_PLATFORM_LINUX_H @@ -23,12 +23,19 @@ extern "C" { #include #include +#include +#include #include #include +#include +#include #include +#include #include #include +#include #include +#include #include #include #include @@ -37,51 +44,131 @@ extern "C" { #include #include #include +#include +#include +#include +#include #include -#include +#include #include #include #include #include #include #include +#include #include #include #include #include +#include #include +#include #include -#include #include #include #include #include #include #include -#include -#include +#include +#include + #define taosCloseSocket(x) \ { \ - if (VALIDFD(x)) { \ + if (FD_VALID(x)) { \ close(x); \ x = -1; \ } \ } + #define taosWriteSocket(fd, buf, len) write(fd, buf, len) #define taosReadSocket(fd, buf, len) read(fd, buf, len) -#define __sync_val_compare_and_swap_64 __sync_val_compare_and_swap -#define __sync_val_compare_and_swap_32 __sync_val_compare_and_swap - -#define __sync_add_and_fetch_64 __sync_add_and_fetch -#define __sync_add_and_fetch_32 __sync_add_and_fetch - -#define __sync_sub_and_fetch_64 __sync_sub_and_fetch -#define __sync_sub_and_fetch_32 __sync_sub_and_fetch - -int32_t __sync_val_load_32(int32_t *ptr); -void __sync_val_restore_32(int32_t *ptr, int32_t newval); +#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_64(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) +#define atomic_load_ptr(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) + +#define atomic_store_8(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_16(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_32(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_64(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_store_ptr(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_exchange_8(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_16(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_32(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_64(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_exchange_ptr(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_val_compare_exchange_8 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_16 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_32 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_64 __sync_val_compare_and_swap +#define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap + +#define atomic_add_fetch_8(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_16(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_32(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_64(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_add_fetch_ptr(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_add_8(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_16(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_32(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_64(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_add_ptr(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_sub_fetch_8(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_16(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_32(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_64(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_sub_fetch_ptr(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_sub_8(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_16(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_32(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_64(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_ptr(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_and_fetch_8(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_16(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_32(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_64(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_and_fetch_ptr(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_and_8(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_16(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_32(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_64(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_and_ptr(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_or_fetch_8(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_16(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_32(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_64(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_or_fetch_ptr(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_or_8(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_16(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_32(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_64(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_or_ptr(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_xor_fetch_8(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_16(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_32(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_64(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_xor_fetch_ptr(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_fetch_xor_8(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_16(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_32(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_64(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_ptr(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) #define SWAP(a, b, c) \ do { \ @@ -138,10 +225,14 @@ bool taosSkipSocketCheck(); int64_t str2int64(char *str); +void taosSetCoreDump(); + +void taosBlockSIGPIPE(); + #define BUILDIN_CLZL(val) __builtin_clzl(val) -#define BUILDIN_CLZ(val) __builtin_clz(val) +#define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZL(val) __builtin_ctzl(val) -#define BUILDIN_CTZ(val) __builtin_ctz(val) +#define BUILDIN_CTZ(val) __builtin_ctz(val) #ifdef __cplusplus } diff --git a/src/os/linux/src/tlinux.c b/src/os/linux/src/tlinux.c index b5271006e251e2de81443f1127053b1db4f7672a..a23919e458709490d45a5bcadebf84ee5fed743d 100644 --- a/src/os/linux/src/tlinux.c +++ b/src/os/linux/src/tlinux.c @@ -163,12 +163,12 @@ int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optle return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); } -int taosOpenUDClientSocket(char *ip, short port) { +int taosOpenUDClientSocket(char *ip, uint16_t port) { int sockFd = 0; struct sockaddr_un serverAddr; int ret; char name[128]; - sprintf(name, "%s.%d", ip, port); + sprintf(name, "%s.%hu", ip, port); sockFd = socket(AF_UNIX, SOCK_STREAM, 0); @@ -191,14 +191,13 @@ int taosOpenUDClientSocket(char *ip, short port) { return sockFd; } -int taosOpenUDServerSocket(char *ip, short port) { +int taosOpenUDServerSocket(char *ip, uint16_t port) { struct sockaddr_un serverAdd; int sockFd; char name[128]; pTrace("open ud socket:%s", name); - // if (tsAllowLocalhost) ip = "0.0.0.0"; - sprintf(name, "%s.%d", ip, port); + sprintf(name, "%s.%hu", ip, port); bzero((char *)&serverAdd, sizeof(serverAdd)); serverAdd.sun_family = AF_UNIX; @@ -265,7 +264,6 @@ void *taosProcessAlarmSignal(void *tharg) { callback(0); } - assert(0); return NULL; } @@ -288,8 +286,10 @@ ssize_t tsendfile(int dfd, int sfd, off_t *offset, size_t size) { ssize_t sentbytes; while (leftbytes > 0) { - // TODO : Think to check if file is larger than 1GB - if (leftbytes > 1000000000) leftbytes = 1000000000; + /* + * TODO : Think to check if file is larger than 1GB + */ + //if (leftbytes > 1000000000) leftbytes = 1000000000; sentbytes = sendfile(dfd, sfd, offset, leftbytes); if (sentbytes == -1) { if (errno == EINTR) { @@ -341,10 +341,12 @@ bool taosSkipSocketCheck() { return false; } -int32_t __sync_val_load_32(int32_t *ptr) { - return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); -} - -void __sync_val_restore_32(int32_t *ptr, int32_t newval) { - __atomic_store_n(ptr, newval, __ATOMIC_RELEASE); +void taosBlockSIGPIPE() { + sigset_t signal_mask; + sigemptyset(&signal_mask); + sigaddset(&signal_mask, SIGPIPE); + int rc = pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); + if (rc != 0) { + pError("failed to block SIGPIPE"); + } } diff --git a/src/os/linux/src/tsystem.c b/src/os/linux/src/tsystem.c index fc2d9860d78f23bec4010774179e1ea64598f586..055a67e0772f1a9595fa2825514358e9ef16083c 100644 --- a/src/os/linux/src/tsystem.c +++ b/src/os/linux/src/tsystem.c @@ -12,7 +12,7 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - +#include #include #include #include @@ -25,6 +25,14 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include #include "tglobalcfg.h" #include "tlog.h" @@ -73,6 +81,7 @@ bool taosGetProcMemory(float *memoryUsedMB) { char * line = NULL; while (!feof(fp)) { tfree(line); + len = 0; getline(&line, &len, fp); if (line == NULL) { break; @@ -90,7 +99,7 @@ bool taosGetProcMemory(float *memoryUsedMB) { int64_t memKB = 0; char tmp[10]; - sscanf(line, "%s %ld", tmp, &memKB); + sscanf(line, "%s %" PRId64, tmp, &memKB); *memoryUsedMB = (float)((double)memKB / 1024); tfree(line); @@ -115,7 +124,7 @@ bool taosGetSysCpuInfo(SysCpuInfo *cpuInfo) { } char cpu[10] = {0}; - sscanf(line, "%s %ld %ld %ld %ld", cpu, &cpuInfo->user, &cpuInfo->nice, &cpuInfo->system, &cpuInfo->idle); + sscanf(line, "%s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, cpu, &cpuInfo->user, &cpuInfo->nice, &cpuInfo->system, &cpuInfo->idle); tfree(line); fclose(fp); @@ -129,7 +138,7 @@ bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) { return false; } - size_t len; + size_t len = 0; char * line = NULL; getline(&line, &len, fp); if (line == NULL) { @@ -141,7 +150,7 @@ bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) { for (int i = 0, blank = 0; line[i] != 0; ++i) { if (line[i] == ' ') blank++; if (blank == PROCESS_ITEM) { - sscanf(line + i + 1, "%ld %ld %ld %ld", &cpuInfo->utime, &cpuInfo->stime, &cpuInfo->cutime, &cpuInfo->cstime); + sscanf(line + i + 1, "%" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, &cpuInfo->utime, &cpuInfo->stime, &cpuInfo->cutime, &cpuInfo->cstime); break; } } @@ -210,25 +219,6 @@ void taosGetSystemTimezone() { pPrint("timezone not configured, set to system default:%s", tsTimezone); } -typedef struct CharsetPair { - char *oldCharset; - char *newCharset; -} CharsetPair; - -char *taosCharsetReplace(char *charsetstr) { - CharsetPair charsetRep[] = { - {"utf8", "UTF-8"}, {"936", "CP936"}, - }; - - for (int32_t i = 0; i < tListLen(charsetRep); ++i) { - if (strcasecmp(charsetRep[i].oldCharset, charsetstr) == 0) { - return strdup(charsetRep[i].newCharset); - } - } - - return strdup(charsetstr); -} - /* * POSIX format locale string: * (Language Strings)_(Country/Region Strings).(code_page) @@ -391,8 +381,8 @@ bool taosGetCardName(char *ip, char *name) { bool taosGetCardInfo(int64_t *bytes) { static char tsPublicCard[1000] = {0}; if (tsPublicCard[0] == 0) { - if (!taosGetCardName(tsInternalIp, tsPublicCard)) { - pError("can't get card name from ip:%s", tsInternalIp); + if (!taosGetCardName(tsPrivateIp, tsPublicCard)) { + pError("can't get card name from ip:%s", tsPrivateIp); return false; } int cardNameLen = (int)strlen(tsPublicCard); @@ -420,6 +410,7 @@ bool taosGetCardInfo(int64_t *bytes) { while (!feof(fp)) { tfree(line); + len = 0; getline(&line, &len, fp); if (line == NULL) { break; @@ -429,7 +420,7 @@ bool taosGetCardInfo(int64_t *bytes) { } } if (line != NULL) { - sscanf(line, "%s %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld", nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, + sscanf(line, "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64, nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &tbytes, &tpackets); *bytes = rbytes + tbytes; tfree(line); @@ -491,15 +482,16 @@ bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { while (!feof(fp)) { tfree(line); + len = 0; getline(&line, &len, fp); if (line == NULL) { break; } if (strstr(line, "rchar:") != NULL) { - sscanf(line, "%s %ld", tmp, readbyte); + sscanf(line, "%s %" PRId64, tmp, readbyte); readIndex++; } else if (strstr(line, "wchar:") != NULL) { - sscanf(line, "%s %ld", tmp, writebyte); + sscanf(line, "%s %" PRId64, tmp, writebyte); readIndex++; } else { } @@ -572,9 +564,9 @@ void taosGetSystemInfo() { } void tsPrintOsInfo() { - pPrint(" os pageSize: %ld(KB)", tsPageSize); - pPrint(" os openMax: %ld", tsOpenMax); - pPrint(" os streamMax: %ld", tsStreamMax); + pPrint(" os pageSize: %" PRId64 "(KB)", tsPageSize); + pPrint(" os openMax: %" PRId64, tsOpenMax); + pPrint(" os streamMax: %" PRId64, tsStreamMax); pPrint(" os numOfCores: %d", tsNumOfCores); pPrint(" os totalDisk: %f(GB)", tsTotalDataDirGB); pPrint(" os totalMemory: %d(MB)", tsTotalMemoryMB); @@ -595,4 +587,129 @@ void taosKillSystem() { // SIGINT pPrint("taosd will shut down soon"); kill(tsProcId, 2); -} \ No newline at end of file +} + +extern int tsEnableCoreFile; +int _sysctl(struct __sysctl_args *args ); +void taosSetCoreDump() { + if (0 == tsEnableCoreFile) { + return; + } + + // 1. set ulimit -c unlimited + struct rlimit rlim; + struct rlimit rlim_new; + if (getrlimit(RLIMIT_CORE, &rlim) == 0) { + pPrint("the old unlimited para: rlim_cur=%d, rlim_max=%d", rlim.rlim_cur, rlim.rlim_max); + rlim_new.rlim_cur = RLIM_INFINITY; + rlim_new.rlim_max = RLIM_INFINITY; + if (setrlimit(RLIMIT_CORE, &rlim_new) != 0) { + pPrint("set unlimited fail, error: %s", strerror(errno)); + rlim_new.rlim_cur = rlim.rlim_max; + rlim_new.rlim_max = rlim.rlim_max; + (void)setrlimit(RLIMIT_CORE, &rlim_new); + } + } + + if (getrlimit(RLIMIT_CORE, &rlim) == 0) { + pPrint("the new unlimited para: rlim_cur=%d, rlim_max=%d", rlim.rlim_cur, rlim.rlim_max); + } + +#ifndef _TD_ARM_ + // 2. set the path for saving core file + struct __sysctl_args args; + int old_usespid = 0; + size_t old_len = 0; + int new_usespid = 1; + size_t new_len = sizeof(new_usespid); + + int name[] = {CTL_KERN, KERN_CORE_USES_PID}; + + memset(&args, 0, sizeof(struct __sysctl_args)); + args.name = name; + args.nlen = sizeof(name)/sizeof(name[0]); + args.oldval = &old_usespid; + args.oldlenp = &old_len; + args.newval = &new_usespid; + args.newlen = new_len; + + old_len = sizeof(old_usespid); + + if (syscall(SYS__sysctl, &args) == -1) { + pPrint("_sysctl(kern_core_uses_pid) set fail: %s", strerror(errno)); + } + + pPrint("The old core_uses_pid[%d]: %d", old_len, old_usespid); + + + old_usespid = 0; + old_len = 0; + memset(&args, 0, sizeof(struct __sysctl_args)); + args.name = name; + args.nlen = sizeof(name)/sizeof(name[0]); + args.oldval = &old_usespid; + args.oldlenp = &old_len; + + old_len = sizeof(old_usespid); + + if (syscall(SYS__sysctl, &args) == -1) { + pPrint("_sysctl(kern_core_uses_pid) get fail: %s", strerror(errno)); + } + + pPrint("The new core_uses_pid[%d]: %d", old_len, old_usespid); +#endif + +#if 0 + // 3. create the path for saving core file + int status; + char coredump_dir[32] = "/var/log/taosdump"; + if (opendir(coredump_dir) == NULL) { + status = mkdir(coredump_dir, S_IRWXU | S_IRWXG | S_IRWXO); + if (status) { + pPrint("mkdir fail, error: %s\n", strerror(errno)); + } + } + + // 4. set kernel.core_pattern + struct __sysctl_args args; + char old_corefile[128]; + size_t old_len; + char new_corefile[128] = "/var/log/taosdump/core-%e-%p"; + size_t new_len = sizeof(new_corefile); + + int name[] = {CTL_KERN, KERN_CORE_PATTERN}; + + memset(&args, 0, sizeof(struct __sysctl_args)); + args.name = name; + args.nlen = sizeof(name)/sizeof(name[0]); + args.oldval = old_corefile; + args.oldlenp = &old_len; + args.newval = new_corefile; + args.newlen = new_len; + + old_len = sizeof(old_corefile); + + if (syscall(SYS__sysctl, &args) == -1) { + pPrint("_sysctl(kern_core_pattern) set fail: %s", strerror(errno)); + } + + pPrint("The old kern_core_pattern: %*s\n", old_len, old_corefile); + + + memset(&args, 0, sizeof(struct __sysctl_args)); + args.name = name; + args.nlen = sizeof(name)/sizeof(name[0]); + args.oldval = old_corefile; + args.oldlenp = &old_len; + + old_len = sizeof(old_corefile); + + if (syscall(SYS__sysctl, &args) == -1) { + pPrint("_sysctl(kern_core_pattern) get fail: %s", strerror(errno)); + } + + pPrint("The new kern_core_pattern: %*s\n", old_len, old_corefile); +#endif + +} + diff --git a/src/os/windows/inc/os.h b/src/os/windows/inc/os.h index 1a840ed1ef0c21263ce5f42c0baa6efd7ef7f025..9c0add2c319829e10d192de8a94dc43038c3155d 100644 --- a/src/os/windows/inc/os.h +++ b/src/os/windows/inc/os.h @@ -16,19 +16,30 @@ #ifndef TDENGINE_PLATFORM_WINDOWS_H #define TDENGINE_PLATFORM_WINDOWS_H +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include #include +#include +#include #include -#include -#include -#include -#include +#include +#include +#include #include "winsock2.h" #include -#include -#include -#include -#include #ifdef __cplusplus extern "C" { @@ -78,14 +89,198 @@ extern "C" { #define taosWriteSocket(fd, buf, len) send(fd, buf, len, 0) #define taosReadSocket(fd, buf, len) recv(fd, buf, len, 0) -int32_t __sync_val_compare_and_swap_32(int32_t *ptr, int32_t oldval, int32_t newval); -int32_t __sync_add_and_fetch_32(int32_t *ptr, int32_t val); -int32_t __sync_sub_and_fetch_32(int32_t *ptr, int32_t val); -int64_t __sync_val_compare_and_swap_64(int64_t *ptr, int64_t oldval, int64_t newval); -int64_t __sync_add_and_fetch_64(int64_t *ptr, int64_t val); -int64_t __sync_sub_and_fetch_64(int64_t *ptr, int64_t val); -int32_t __sync_val_load_32(int32_t *ptr); -void __sync_val_restore_32(int32_t *ptr, int32_t newval); +#if defined(_M_ARM) || defined(_M_ARM64) + +/* the '__iso_volatile' functions does not use a memory fence, so these + * definitions are incorrect, comment out as we don't support Windows on + * ARM at present. + +#define atomic_load_8(ptr) __iso_volatile_load8((const volatile __int8*)(ptr)) +#define atomic_load_16(ptr) __iso_volatile_load16((const volatile __int16*)(ptr)) +#define atomic_load_32(ptr) __iso_volatile_load32((const volatile __int32*)(ptr)) +#define atomic_load_64(ptr) __iso_volatile_load64((const volatile __int64*)(ptr)) + +#define atomic_store_8(ptr, val) __iso_volatile_store8((volatile __int8*)(ptr), (__int8)(val)) +#define atomic_store_16(ptr, val) __iso_volatile_store16((volatile __int16*)(ptr), (__int16)(val)) +#define atomic_store_32(ptr, val) __iso_volatile_store32((volatile __int32*)(ptr), (__int32)(val)) +#define atomic_store_64(ptr, val) __iso_volatile_store64((volatile __int64*)(ptr), (__int64)(val)) + +#ifdef _M_ARM64 +#define atomic_load_ptr atomic_load_64 +#define atomic_store_ptr atomic_store_64 +#else +#define atomic_load_ptr atomic_load_32 +#define atomic_store_ptr atomic_store_32 +#endif +*/ +#else + +#define atomic_load_8(ptr) (*(char volatile*)(ptr)) +#define atomic_load_16(ptr) (*(short volatile*)(ptr)) +#define atomic_load_32(ptr) (*(long volatile*)(ptr)) +#define atomic_load_64(ptr) (*(__int64 volatile*)(ptr)) +#define atomic_load_ptr(ptr) (*(void* volatile*)(ptr)) + +#define atomic_store_8(ptr, val) ((*(char volatile*)(ptr)) = (char)(val)) +#define atomic_store_16(ptr, val) ((*(short volatile*)(ptr)) = (short)(val)) +#define atomic_store_32(ptr, val) ((*(long volatile*)(ptr)) = (long)(val)) +#define atomic_store_64(ptr, val) ((*(__int64 volatile*)(ptr)) = (__int64)(val)) +#define atomic_store_ptr(ptr, val) ((*(void* volatile*)(ptr)) = (void*)(val)) + +#endif + +#define atomic_exchange_8(ptr, val) _InterlockedExchange8((char volatile*)(ptr), (char)(val)) +#define atomic_exchange_16(ptr, val) _InterlockedExchange16((short volatile*)(ptr), (short)(val)) +#define atomic_exchange_32(ptr, val) _InterlockedExchange((long volatile*)(ptr), (long)(val)) +#define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val)) +#define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val)) + +#define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval)) +#define atomic_val_compare_exchange_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval)) +#define atomic_val_compare_exchange_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval)) +#define atomic_val_compare_exchange_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval)) +#define atomic_val_compare_exchange_ptr(ptr, oldval, newval) _InterlockedCompareExchangePointer((void* volatile*)(ptr), (void*)(newval), (void*)(oldval)) + +char interlocked_add_fetch_8(char volatile *ptr, char val); +short interlocked_add_fetch_16(short volatile *ptr, short val); +long interlocked_add_fetch_32(long volatile *ptr, long val); +__int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val); + +#define atomic_add_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), (char)(val)) +#define atomic_add_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), (short)(val)) +#define atomic_add_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), (long)(val)) +#define atomic_add_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) +#ifdef _WIN64 + #define atomic_add_fetch_ptr atomic_add_fetch_64 +#else + #define atomic_add_fetch_ptr atomic_add_fetch_32 +#endif + +#define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val)) +#define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val)) +#define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val)) +#define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val)) +#ifdef _WIN64 + #define atomic_fetch_add_ptr atomic_fetch_add_64 +#else + #define atomic_fetch_add_ptr atomic_fetch_add_32 +#endif + +#define atomic_sub_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), -(char)(val)) +#define atomic_sub_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), -(short)(val)) +#define atomic_sub_fetch_32(ptr, val) interlocked_add_fetch_32((long volatile*)(ptr), -(long)(val)) +#define atomic_sub_fetch_64(ptr, val) interlocked_add_fetch_64((__int64 volatile*)(ptr), -(__int64)(val)) +#ifdef _WIN64 + #define atomic_sub_fetch_ptr atomic_sub_fetch_64 +#else + #define atomic_sub_fetch_ptr atomic_sub_fetch_32 +#endif + +#define atomic_fetch_sub_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), -(char)(val)) +#define atomic_fetch_sub_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), -(short)(val)) +#define atomic_fetch_sub_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), -(long)(val)) +#define atomic_fetch_sub_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), -(__int64)(val)) +#ifdef _WIN64 + #define atomic_fetch_sub_ptr atomic_fetch_sub_64 +#else + #define atomic_fetch_sub_ptr atomic_fetch_sub_32 +#endif + +char interlocked_and_fetch_8(char volatile* ptr, char val); +short interlocked_and_fetch_16(short volatile* ptr, short val); +long interlocked_and_fetch_32(long volatile* ptr, long val); +__int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val); + +#define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val)) +#define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val)) +#define atomic_and_fetch_32(ptr, val) interlocked_and_fetch_32((long volatile*)(ptr), (long)(val)) +#define atomic_and_fetch_64(ptr, val) interlocked_and_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) +#ifdef _WIN64 + #define atomic_and_fetch_ptr atomic_and_fetch_64 +#else + #define atomic_and_fetch_ptr atomic_and_fetch_32 +#endif + +#define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val)) +#define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val)) +#define atomic_fetch_and_32(ptr, val) _InterlockedAnd((long volatile*)(ptr), (long)(val)) + +#ifdef _M_IX86 + __int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val); + #define atomic_fetch_and_64(ptr, val) interlocked_fetch_and_64((__int64 volatile*)(ptr), (__int64)(val)) +#else + #define atomic_fetch_and_64(ptr, val) _InterlockedAnd64((__int64 volatile*)(ptr), (__int64)(val)) +#endif + +#ifdef _WIN64 + #define atomic_fetch_and_ptr atomic_fetch_and_64 +#else + #define atomic_fetch_and_ptr atomic_fetch_and_32 +#endif + +char interlocked_or_fetch_8(char volatile* ptr, char val); +short interlocked_or_fetch_16(short volatile* ptr, short val); +long interlocked_or_fetch_32(long volatile* ptr, long val); +__int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val); + +#define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val)) +#define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val)) +#define atomic_or_fetch_32(ptr, val) interlocked_or_fetch_32((long volatile*)(ptr), (long)(val)) +#define atomic_or_fetch_64(ptr, val) interlocked_or_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) +#ifdef _WIN64 + #define atomic_or_fetch_ptr atomic_or_fetch_64 +#else + #define atomic_or_fetch_ptr atomic_or_fetch_32 +#endif + +#define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val)) +#define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val)) +#define atomic_fetch_or_32(ptr, val) _InterlockedOr((long volatile*)(ptr), (long)(val)) + +#ifdef _M_IX86 + __int64 interlocked_fetch_or_64(__int64 volatile* ptr, __int64 val); + #define atomic_fetch_or_64(ptr, val) interlocked_fetch_or_64((__int64 volatile*)(ptr), (__int64)(val)) +#else + #define atomic_fetch_or_64(ptr, val) _InterlockedOr64((__int64 volatile*)(ptr), (__int64)(val)) +#endif + +#ifdef _WIN64 + #define atomic_fetch_or_ptr atomic_fetch_or_64 +#else + #define atomic_fetch_or_ptr atomic_fetch_or_32 +#endif + +char interlocked_xor_fetch_8(char volatile* ptr, char val); +short interlocked_xor_fetch_16(short volatile* ptr, short val); +long interlocked_xor_fetch_32(long volatile* ptr, long val); +__int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val); + +#define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val)) +#define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val)) +#define atomic_xor_fetch_32(ptr, val) interlocked_xor_fetch_32((long volatile*)(ptr), (long)(val)) +#define atomic_xor_fetch_64(ptr, val) interlocked_xor_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) +#ifdef _WIN64 + #define atomic_xor_fetch_ptr atomic_xor_fetch_64 +#else + #define atomic_xor_fetch_ptr atomic_xor_fetch_32 +#endif + +#define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val)) +#define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val)) +#define atomic_fetch_xor_32(ptr, val) _InterlockedXor((long volatile*)(ptr), (long)(val)) + +#ifdef _M_IX86 + __int64 interlocked_fetch_xor_64(__int64 volatile* ptr, __int64 val); + #define atomic_fetch_xor_64(ptr, val) interlocked_fetch_xor_64((__int64 volatile*)(ptr), (__int64)(val)) +#else + #define atomic_fetch_xor_64(ptr, val) _InterlockedXor64((__int64 volatile*)(ptr), (__int64)(val)) +#endif + +#ifdef _WIN64 + #define atomic_fetch_xor_ptr atomic_fetch_xor_64 +#else + #define atomic_fetch_xor_ptr atomic_fetch_xor_32 +#endif #define SWAP(a, b, c) \ do { \ @@ -181,6 +376,8 @@ int fsendfile(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count); char *strndup(const char *s, size_t n); +void taosSetCoreDump(); + #ifdef __cplusplus } #endif diff --git a/src/os/windows/src/twindows.c b/src/os/windows/src/twindows.c index b1c3112bdbf6f899e1e14ddfba5070636bc58cc0..98be6b60ba16e52b2177971d95930f7f717785aa 100644 --- a/src/os/windows/src/twindows.c +++ b/src/os/windows/src/twindows.c @@ -43,8 +43,11 @@ void taosResetPthread(pthread_t *thread) { } int64_t taosGetPthreadId() { - pthread_t id = pthread_self(); - return (int64_t)id.p; +#ifdef PTW32_VERSION + return pthread_getw32threadid_np(pthread_self()); +#else + return (int64_t)pthread_self(); +#endif } int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optlen) { @@ -63,44 +66,145 @@ int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optle return setsockopt(socketfd, level, optname, optval, optlen); } -int32_t __sync_val_compare_and_swap_32(int32_t *ptr, int32_t oldval, int32_t newval) { - return InterlockedCompareExchange(ptr, newval, oldval); +// add +char interlocked_add_fetch_8(char volatile* ptr, char val) { + return _InterlockedExchangeAdd8(ptr, val) + val; } -int32_t __sync_add_and_fetch_32(int32_t *ptr, int32_t val) { - return InterlockedAdd(ptr, val); +short interlocked_add_fetch_16(short volatile* ptr, short val) { + return _InterlockedExchangeAdd16(ptr, val) + val; } -int32_t __sync_sub_and_fetch_32(int32_t *ptr, int32_t val) { - return InterlockedAdd(ptr, -val); +long interlocked_add_fetch_32(long volatile* ptr, long val) { + return _InterlockedExchangeAdd(ptr, val) + val; } -int64_t __sync_val_compare_and_swap_64(int64_t *ptr, int64_t oldval, int64_t newval) { - return InterlockedCompareExchange64(ptr, newval, oldval); +__int64 interlocked_add_fetch_64(__int64 volatile* ptr, __int64 val) { + return _InterlockedExchangeAdd64(ptr, val) + val; } -int64_t __sync_add_and_fetch_64(int64_t *ptr, int64_t val) { - return InterlockedAdd64(ptr, val); +// and +char interlocked_and_fetch_8(char volatile* ptr, char val) { + return _InterlockedAnd8(ptr, val) & val; } -int64_t __sync_sub_and_fetch_64(int64_t *ptr, int64_t val) { - return InterlockedAdd64(ptr, -val); +short interlocked_and_fetch_16(short volatile* ptr, short val) { + return _InterlockedAnd16(ptr, val) & val; } -int32_t __sync_val_load_32(int32_t *ptr) { - return InterlockedOr(ptr, 0); +long interlocked_and_fetch_32(long volatile* ptr, long val) { + return _InterlockedAnd(ptr, val) & val; } -void __sync_val_restore_32(int32_t *ptr, int32_t newval) { - InterlockedCompareExchange(ptr, *ptr, newval); +#ifndef _M_IX86 + +__int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val) { + return _InterlockedAnd64(ptr, val) & val; } -void tsPrintOsInfo() {} +#else + +__int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val) { + __int64 old, res; + do { + old = *ptr; + res = old & val; + } while(_InterlockedCompareExchange64(ptr, res, old) != old); + return res; +} + +__int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val) { + __int64 old; + do { + old = *ptr; + } while(_InterlockedCompareExchange64(ptr, old & val, old) != old); + return old; +} + +#endif + +// or +char interlocked_or_fetch_8(char volatile* ptr, char val) { + return _InterlockedOr8(ptr, val) | val; +} + +short interlocked_or_fetch_16(short volatile* ptr, short val) { + return _InterlockedOr16(ptr, val) | val; +} + +long interlocked_or_fetch_32(long volatile* ptr, long val) { + return _InterlockedOr(ptr, val) | val; +} + +#ifndef _M_IX86 + +__int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val) { + return _InterlockedOr64(ptr, val) & val; +} + +#else -char *taosCharsetReplace(char *charsetstr) { - return charsetstr; +__int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val) { + __int64 old, res; + do { + old = *ptr; + res = old | val; + } while(_InterlockedCompareExchange64(ptr, res, old) != old); + return res; } +__int64 interlocked_fetch_or_64(__int64 volatile* ptr, __int64 val) { + __int64 old; + do { + old = *ptr; + } while(_InterlockedCompareExchange64(ptr, old | val, old) != old); + return old; +} + +#endif + +// xor +char interlocked_xor_fetch_8(char volatile* ptr, char val) { + return _InterlockedXor8(ptr, val) ^ val; +} + +short interlocked_xor_fetch_16(short volatile* ptr, short val) { + return _InterlockedXor16(ptr, val) ^ val; +} + +long interlocked_xor_fetch_32(long volatile* ptr, long val) { + return _InterlockedXor(ptr, val) ^ val; +} + +#ifndef _M_IX86 + +__int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val) { + return _InterlockedXor64(ptr, val) ^ val; +} + +#else + +__int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val) { + __int64 old, res; + do { + old = *ptr; + res = old ^ val; + } while(_InterlockedCompareExchange64(ptr, res, old) != old); + return res; +} + +__int64 interlocked_fetch_xor_64(__int64 volatile* ptr, __int64 val) { + __int64 old; + do { + old = *ptr; + } while(_InterlockedCompareExchange64(ptr, old ^ val, old) != old); + return old; +} + +#endif + +void tsPrintOsInfo() {} + void taosGetSystemTimezone() { // get and set default timezone SGlobalConfig *cfg_timezone = tsGetConfigOption("timezone"); @@ -290,4 +394,6 @@ char *strndup(const char *s, size_t n) { memcpy(r, s, len); r[len] = 0; return r; -} \ No newline at end of file +} + +void taosSetCoreDump() {} \ No newline at end of file diff --git a/src/os/windows/src/twintcpclient.c b/src/os/windows/src/twintcpclient.c index 17293c3915aa73aac226df0fdb798b523a66bdbc..9f40dae434b0dba2c511cf9b837a53a8a8910250 100644 --- a/src/os/windows/src/twintcpclient.c +++ b/src/os/windows/src/twintcpclient.c @@ -15,7 +15,7 @@ #include "tlog.h" -void *taosInitTcpClient(char *ip, short port, char *label, int num, void *fp, void *shandle) { +void *taosInitTcpClient(char *ip, uint16_t port, char *label, int num, void *fp, void *shandle) { tError("InitTcpClient not support in windows"); return 0; } @@ -24,12 +24,12 @@ void taosCloseTcpClientConnection(void *chandle) { tError("CloseTcpClientConnection not support in windows"); } -void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, short port) { +void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, uint16_t port) { tError("OpenTcpClientConnection not support in windows"); return 0; } -int taosSendTcpClientData(unsigned int ip, short port, char *data, int len, void *chandle) { +int taosSendTcpClientData(unsigned int ip, uint16_t port, char *data, int len, void *chandle) { tError("SendTcpClientData not support in windows"); return 0; } diff --git a/src/os/windows/src/twintcpserver.c b/src/os/windows/src/twintcpserver.c index a51d807aa2481b59129d54a5a48d008c06d18d70..d5e25693d0b591eddf0a5da2469e86972bb5be00 100644 --- a/src/os/windows/src/twintcpserver.c +++ b/src/os/windows/src/twintcpserver.c @@ -23,12 +23,12 @@ void taosCleanUpTcpServer(void *handle) { tError("CleanUpTcpServer not support in windows"); } -void *taosInitTcpServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle) { +void *taosInitTcpServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle) { tError("InitTcpServer not support in windows"); return 0; } -int taosSendTcpServerData(unsigned int ip, short port, char *data, int len, void *chandle) { +int taosSendTcpServerData(unsigned int ip, uint16_t port, char *data, int len, void *chandle) { tError("SendTcpServerData not support in windows"); return 0; } diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt index b3a761e3dffdf01e24f9e22369a940cfadd80e2a..8d54d5fda591acd790c4bfab751cec079762e561 100644 --- a/src/rpc/CMakeLists.txt +++ b/src/rpc/CMakeLists.txt @@ -5,7 +5,7 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) INCLUDE_DIRECTORIES(inc) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(./src SRC) ELSEIF (TD_DARWIN_64) LIST(APPEND SRC ./src/thaship.c) diff --git a/src/rpc/inc/thaship.h b/src/rpc/inc/thaship.h index 262673af6299df3b474fa385e6a9191f8e213d77..4acf8b3fbbab14702b3f4e98dc40b0f68fa3a3e2 100644 --- a/src/rpc/inc/thaship.h +++ b/src/rpc/inc/thaship.h @@ -18,8 +18,8 @@ void *taosOpenIpHash(int maxSessions); void taosCloseIpHash(void *handle); -void *taosAddIpHash(void *handle, void *pData, uint32_t ip, short port); -void taosDeleteIpHash(void *handle, uint32_t ip, short port); -void *taosGetIpHash(void *handle, uint32_t ip, short port); +void *taosAddIpHash(void *handle, void *pData, uint32_t ip, uint16_t port); +void taosDeleteIpHash(void *handle, uint32_t ip, uint16_t port); +void *taosGetIpHash(void *handle, uint32_t ip, uint16_t port); #endif diff --git a/src/rpc/inc/ttcpclient.h b/src/rpc/inc/ttcpclient.h index 8c2131f1f6bbdb9a90712caef73d6361ca361e1b..8427c6f162fa0906af0c51e9ff49ad7e85c4b5a5 100644 --- a/src/rpc/inc/ttcpclient.h +++ b/src/rpc/inc/ttcpclient.h @@ -18,10 +18,10 @@ #include "tsdb.h" -void *taosInitTcpClient(char *ip, short port, char *label, int num, void *fp, void *shandle); +void *taosInitTcpClient(char *ip, uint16_t port, char *label, int num, void *fp, void *shandle); void taosCleanUpTcpClient(void *chandle); -void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, short port); +void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, uint16_t port); void taosCloseTcpClientConnection(void *chandle); -int taosSendTcpClientData(uint32_t ip, short port, char *data, int len, void *chandle); +int taosSendTcpClientData(uint32_t ip, uint16_t port, char *data, int len, void *chandle); #endif diff --git a/src/rpc/inc/ttcpserver.h b/src/rpc/inc/ttcpserver.h index 3e3feb46918f3767da89a5a6fa9353fc0a89a17a..ba3bd25719f05372ab97b1dc67a92d244e03ae64 100644 --- a/src/rpc/inc/ttcpserver.h +++ b/src/rpc/inc/ttcpserver.h @@ -18,9 +18,9 @@ #include "tsdb.h" -void *taosInitTcpServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle); +void *taosInitTcpServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle); void taosCleanUpTcpServer(void *param); void taosCloseTcpServerConnection(void *param); -int taosSendTcpServerData(uint32_t ip, short port, char *data, int len, void *chandle); +int taosSendTcpServerData(uint32_t ip, uint16_t port, char *data, int len, void *chandle); #endif diff --git a/src/rpc/inc/tudp.h b/src/rpc/inc/tudp.h index c90e21f510dd424d898c22980995fabb2285fee0..27c7593090d89d34b77ba34cf6c2f8b8366f2bd6 100644 --- a/src/rpc/inc/tudp.h +++ b/src/rpc/inc/tudp.h @@ -18,11 +18,11 @@ #include "tsdb.h" -void *taosInitUdpServer(char *ip, short port, char *label, int, void *fp, void *shandle); -void *taosInitUdpClient(char *ip, short port, char *label, int, void *fp, void *shandle); +void *taosInitUdpServer(char *ip, uint16_t port, char *label, int, void *fp, void *shandle); +void *taosInitUdpClient(char *ip, uint16_t port, char *label, int, void *fp, void *shandle); void taosCleanUpUdpConnection(void *handle); -int taosSendUdpData(uint32_t ip, short port, char *data, int dataLen, void *chandle); -void *taosOpenUdpConnection(void *shandle, void *thandle, char *ip, short port); +int taosSendUdpData(uint32_t ip, uint16_t port, char *data, int dataLen, void *chandle); +void *taosOpenUdpConnection(void *shandle, void *thandle, char *ip, uint16_t port); void taosFreeMsgHdr(void *hdr); int taosMsgHdrSize(void *hdr); diff --git a/src/rpc/src/thaship.c b/src/rpc/src/thaship.c index 4cc6feeea3355724cf73249cc16920e870604bcc..6b76c4b59e236418c408fabaac5c717cbbafcc55 100644 --- a/src/rpc/src/thaship.c +++ b/src/rpc/src/thaship.c @@ -13,21 +13,13 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "tlog.h" #include "tmempool.h" typedef struct _ip_hash_t { uint32_t ip; - short port; + uint16_t port; int hash; struct _ip_hash_t *prev; struct _ip_hash_t *next; @@ -40,20 +32,20 @@ typedef struct { int maxSessions; } SHashObj; -int taosHashIp(void *handle, uint32_t ip, short port) { +int taosHashIp(void *handle, uint32_t ip, uint16_t port) { SHashObj *pObj = (SHashObj *)handle; int hash = 0; hash = (int)(ip >> 16); hash += (unsigned short)(ip & 0xFFFF); - hash += (unsigned short)port; + hash += port; hash = hash % pObj->maxSessions; return hash; } -void *taosAddIpHash(void *handle, void *data, uint32_t ip, short port) { +void *taosAddIpHash(void *handle, void *data, uint32_t ip, uint16_t port) { int hash; SIpHash * pNode; SHashObj *pObj; @@ -76,7 +68,7 @@ void *taosAddIpHash(void *handle, void *data, uint32_t ip, short port) { return pObj; } -void taosDeleteIpHash(void *handle, uint32_t ip, short port) { +void taosDeleteIpHash(void *handle, uint32_t ip, uint16_t port) { int hash; SIpHash * pNode; SHashObj *pObj; @@ -108,7 +100,7 @@ void taosDeleteIpHash(void *handle, uint32_t ip, short port) { } } -void *taosGetIpHash(void *handle, uint32_t ip, short port) { +void *taosGetIpHash(void *handle, uint32_t ip, uint16_t port) { int hash; SIpHash * pNode; SHashObj *pObj; diff --git a/src/rpc/src/tmsghdr.c b/src/rpc/src/tmsghdr.c index 960d1fc5f6a534f9de9055ac5b2590a98e31f8d2..a46f182b1d21b31e08c040b4990e74a27971b46e 100644 --- a/src/rpc/src/tmsghdr.c +++ b/src/rpc/src/tmsghdr.c @@ -13,10 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include +#include "os.h" void taosFreeMsgHdr(void *hdr) { struct msghdr *msgHdr = (struct msghdr *)hdr; diff --git a/src/rpc/src/trpc.c b/src/rpc/src/trpc.c old mode 100644 new mode 100755 index 6f92154e9ef6d6dfa444eacfa687763c13f1b7e9..db1ca33841c502f2b983501880412d79bf02e175 --- a/src/rpc/src/trpc.c +++ b/src/rpc/src/trpc.c @@ -13,15 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "shash.h" #include "taosmsg.h" @@ -38,8 +29,7 @@ #include "ttimer.h" #include "tudp.h" #include "tutil.h" - -#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" +#include "lz4.h" typedef struct _msg_node { struct _msg_node *next; @@ -58,23 +48,22 @@ typedef struct { char encrypt; uint8_t secret[TSDB_KEY_LEN]; uint8_t ckey[TSDB_KEY_LEN]; - - short localPort; // for UDP only + uint16_t localPort; // for UDP only uint32_t peerUid; uint32_t peerIp; // peer IP - short peerPort; // peer port + uint16_t peerPort; // peer port char peerIpstr[20]; // peer IP string uint16_t tranId; // outgoing transcation ID, for build message uint16_t outTranId; // outgoing transcation ID uint16_t inTranId; - char outType; + uint8_t outType; char inType; char closing; char rspReceived; void * chandle; // handle passed by TCP/UDP connection layer void * ahandle; // handle returned by upper app layter int retry; - int tretry; // total retry + int tretry; // total retry void * pTimer; void * pIdleTimer; char * pRspMsg; @@ -87,7 +76,7 @@ typedef struct { typedef struct { int sessions; - void * qhandle; // for scheduler + void * qhandle; // for scheduler SRpcConn * connList; void * idPool; void * tmrCtrl; @@ -102,12 +91,12 @@ typedef struct rpc_server { int mask; int numOfChanns; int numOfThreads; - int idMgmt; // ID management method + int idMgmt; // ID management method int type; - int idleTime; // milliseconds; - int noFree; // do not free the request msg when rsp is received - int index; // for UDP server, next thread for new connection - short localPort; + int idleTime; // milliseconds; + int noFree; // do not free the request msg when rsp is received + int index; // for UDP server, next thread for new connection + uint16_t localPort; char label[12]; void *(*fp)(char *, void *ahandle, void *thandle); void (*efp)(int); // FP to report error @@ -115,23 +104,22 @@ typedef struct rpc_server { SRpcChann *channList; } STaosRpc; - -int tsRpcProgressTime = 10; // milliseocnds +int tsRpcProgressTime = 10; // milliseocnds // not configurable int tsRpcMaxRetry; int tsRpcHeadSize; -void *(*taosInitConn[])(char *ip, short port, char *label, int threads, void *fp, void *shandle) = { +void *(*taosInitConn[])(char *ip, uint16_t port, char *label, int threads, void *fp, void *shandle) = { taosInitUdpServer, taosInitUdpClient, taosInitTcpServer, taosInitTcpClient}; void (*taosCleanUpConn[])(void *thandle) = {taosCleanUpUdpConnection, taosCleanUpUdpConnection, taosCleanUpTcpServer, taosCleanUpTcpClient}; -int (*taosSendData[])(uint32_t ip, short port, char *data, int len, void *chandle) = { +int (*taosSendData[])(uint32_t ip, uint16_t port, char *data, int len, void *chandle) = { taosSendUdpData, taosSendUdpData, taosSendTcpServerData, taosSendTcpClientData}; -void *(*taosOpenConn[])(void *shandle, void *thandle, char *ip, short port) = { +void *(*taosOpenConn[])(void *shandle, void *thandle, char *ip, uint16_t port) = { taosOpenUdpConnection, taosOpenUdpConnection, NULL, @@ -142,13 +130,98 @@ void (*taosCloseConn[])(void *chandle) = {NULL, NULL, taosCloseTcpServerConnecti int taosReSendRspToPeer(SRpcConn *pConn); void taosProcessTaosTimer(void *, void *); -void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, short port, void *shandle, void *thandle, +void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, uint16_t port, void *shandle, void *thandle, void *chandle); int taosSendDataToPeer(SRpcConn *pConn, char *data, int dataLen); void taosProcessSchedMsg(SSchedMsg *pMsg); int taosAuthenticateMsg(uint8_t *pMsg, int msgLen, uint8_t *pAuth, uint8_t *pKey); int taosBuildAuthHeader(uint8_t *pMsg, int msgLen, uint8_t *pAuth, uint8_t *pKey); +static int32_t taosCompressRpcMsg(char* pCont, int32_t contLen) { + STaosHeader* pHeader = (STaosHeader *)(pCont - sizeof(STaosHeader)); + int32_t overhead = sizeof(int32_t) * 2; + int32_t finalLen = 0; + + if (!NEEDTO_COMPRESSS_MSG(contLen)) { + return contLen; + } + + char *buf = malloc (contLen + overhead + 8); // 16 extra bytes + if (buf == NULL) { + tError("failed to allocate memory for rpc msg compression, contLen:%d, reason:%s", contLen, strerror(errno)); + return contLen; + } + + int32_t compLen = LZ4_compress_default(pCont, buf, contLen, contLen + overhead); + + /* + * only the compressed size is less than the value of contLen - overhead, the compression is applied + * The first four bytes is set to 0, the second four bytes are utilized to keep the original length of message + */ + if (compLen < contLen - overhead) { + //tDump(pCont, contLen); + int32_t *pLen = (int32_t *)pCont; + + *pLen = 0; // first 4 bytes must be zero + pLen = (int32_t *)(pCont + sizeof(int32_t)); + + *pLen = htonl(contLen); // contLen is encoded in second 4 bytes + memcpy(pCont + overhead, buf, compLen); + + pHeader->comp = 1; + tTrace("compress rpc msg, before:%lld, after:%lld", contLen, compLen); + + finalLen = compLen + overhead; + //tDump(pCont, contLen); + } else { + finalLen = contLen; + } + + free(buf); + return finalLen; +} + +static STaosHeader* taosDecompressRpcMsg(STaosHeader* pHeader, SSchedMsg* pSchedMsg, int32_t msgLen) { + int overhead = sizeof(int32_t) * 2; + + if (pHeader->comp == 0) { + pSchedMsg->msg = (char *)(&(pHeader->destId)); + return pHeader; + } + + // decompress the content + assert(GET_INT32_VAL(pHeader->content) == 0); + + // contLen is original message length before compression applied + int contLen = htonl(GET_INT32_VAL(pHeader->content + sizeof(int32_t))); + + // prepare the temporary buffer to decompress message + char *buf = malloc(sizeof(STaosHeader) + contLen); + + //tDump(pHeader->content, msgLen); + + if (buf) { + int32_t originalLen = LZ4_decompress_safe((const char*)(pHeader->content + overhead), buf + sizeof(STaosHeader), + msgLen - overhead, contLen); + + memcpy(buf, pHeader, sizeof(STaosHeader)); + free(pHeader); // free the compressed message buffer + + STaosHeader* pNewHeader = (STaosHeader *) buf; + pNewHeader->msgLen = originalLen + (int) sizeof(SIntMsg); + assert(originalLen == contLen); + + pSchedMsg->msg = (char *)(&(pNewHeader->destId)); + //tDump(pHeader->content, contLen); + return pNewHeader; + } else { + tError("failed to allocate memory to decompress msg, contLen:%d, reason:%s", contLen, strerror(errno)); + pSchedMsg->msg = NULL; + } + + return NULL; +} + char *taosBuildReqHeader(void *param, char type, char *msg) { STaosHeader *pHeader; SRpcConn * pConn = (SRpcConn *)param; @@ -159,18 +232,21 @@ char *taosBuildReqHeader(void *param, char type, char *msg) { } pHeader = (STaosHeader *)(msg + sizeof(SMsgNode)); + memset(pHeader, 0, sizeof(STaosHeader)); pHeader->version = 1; + pHeader->comp = 0; pHeader->msgType = type; pHeader->spi = 0; pHeader->tcp = 0; pHeader->encrypt = 0; - pHeader->tranId = __sync_add_and_fetch_32(&pConn->tranId, 1); - if (pHeader->tranId == 0) pHeader->tranId = __sync_add_and_fetch_32(&pConn->tranId, 1); + pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1); + if (pHeader->tranId == 0) pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1); pHeader->sourceId = pConn->ownId; pHeader->destId = pConn->peerId; pHeader->port = 0; - pHeader->uid = (uint32_t)pConn + (uint32_t)getpid(); + + pHeader->uid = (uint32_t)((int64_t)pConn + (int64_t)getpid()); memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); @@ -196,12 +272,14 @@ char *taosBuildReqMsgWithSize(void *param, char type, int size) { pHeader->spi = 0; pHeader->tcp = 0; pHeader->encrypt = 0; - pHeader->tranId = __sync_add_and_fetch_32(&pConn->tranId, 1); - if (pHeader->tranId == 0) pHeader->tranId = __sync_add_and_fetch_32(&pConn->tranId, 1); + pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1); + if (pHeader->tranId == 0) pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1); pHeader->sourceId = pConn->ownId; pHeader->destId = pConn->peerId; - pHeader->uid = (uint32_t)pConn + (uint32_t)getpid(); + + pHeader->uid = (uint32_t)((int64_t)pConn + (int64_t)getpid()); + memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); return (char *)pHeader->content; @@ -250,6 +328,10 @@ int taosSendSimpleRsp(void *thandle, char rsptype, char code) { } pStart = taosBuildRspMsgWithSize(thandle, rsptype, 32); + if (pStart == NULL) { + tError("build rsp msg error, return null prt"); + return -1; + } pMsg = pStart; *pMsg = code; @@ -362,6 +444,8 @@ int taosOpenRpcChannWithQ(void *handle, int cid, int sessions, void *qhandle) { STaosRpc * pServer = (STaosRpc *)handle; SRpcChann *pChann; + tTrace("cid:%d, handle:%p open rpc chann", cid, handle); + if (pServer == NULL) return -1; if (cid >= pServer->numOfChanns || cid < 0) { tError("%s: cid:%d, chann is out of range, max:%d", pServer->label, cid, pServer->numOfChanns); @@ -410,6 +494,8 @@ void taosCloseRpcChann(void *handle, int cid) { STaosRpc * pServer = (STaosRpc *)handle; SRpcChann *pChann; + tTrace("cid:%d, handle:%p close rpc chann", cid, handle); + if (pServer == NULL) return; if (cid >= pServer->numOfChanns || cid < 0) { tError("%s cid:%d, chann is out of range, max:%d", pServer->label, cid, pServer->numOfChanns); @@ -724,7 +810,7 @@ void taosProcessResponse(SRpcConn *pConn) { } int taosProcessMsgHeader(STaosHeader *pHeader, SRpcConn **ppConn, STaosRpc *pServer, int dataLen, uint32_t ip, - short port, void *chandle) { + uint16_t port, void *chandle) { int chann, sid, code = 0; SRpcConn * pConn = NULL; SRpcChann *pChann; @@ -980,6 +1066,16 @@ int taosBuildErrorMsgToPeer(char *pMsg, int code, char *pReply) { return msgLen; } +void taosReportDisconnection(SRpcChann *pChann, SRpcConn *pConn) +{ + SSchedMsg schedMsg; + schedMsg.fp = taosProcessSchedMsg; + schedMsg.msg = NULL; + schedMsg.ahandle = pConn->ahandle; + schedMsg.thandle = pConn; + taosScheduleTask(pChann->qhandle, &schedMsg); +} + void taosProcessIdleTimer(void *param, void *tmrId) { SRpcConn *pConn = (SRpcConn *)param; if (pConn->signature != param) { @@ -995,25 +1091,23 @@ void taosProcessIdleTimer(void *param, void *tmrId) { return; } + int reportDisc = 0; + pthread_mutex_lock(&pChann->mutex); tTrace("%s cid:%d sid:%d id:%s, close the connection since no activity pConn:%p", pServer->label, pConn->chann, pConn->sid, pConn->meterId, pConn); if (pConn->rspReceived == 0) { pConn->rspReceived = 1; - - SSchedMsg schedMsg; - schedMsg.fp = taosProcessSchedMsg; - schedMsg.msg = NULL; - schedMsg.ahandle = pConn->ahandle; - schedMsg.thandle = pConn; - taosScheduleTask(pChann->qhandle, &schedMsg); + reportDisc = 1; } pthread_mutex_unlock(&pChann->mutex); + + if (reportDisc) taosReportDisconnection(pChann, pConn); } -void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, short port, void *shandle, void *thandle, +void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, uint16_t port, void *shandle, void *thandle, void *chandle) { STaosHeader *pHeader; uint8_t code; @@ -1035,11 +1129,7 @@ void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, short port, pConn->meterId, pConn); pConn->rspReceived = 1; pConn->chandle = NULL; - schedMsg.fp = taosProcessSchedMsg; - schedMsg.msg = NULL; - schedMsg.ahandle = pConn->ahandle; - schedMsg.thandle = pConn; - taosScheduleTask(pChann->qhandle, &schedMsg); + taosReportDisconnection(pChann, pConn); } tfree(data); return NULL; @@ -1078,7 +1168,9 @@ void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, short port, if (code != 0) { // parsing error - if (pHeader->msgType & 1) { + if (pHeader->msgType & 1U) { + memset(pReply, 0, sizeof(pReply)); + msgLen = taosBuildErrorMsgToPeer(data, code, pReply); (*taosSendData[pServer->type])(ip, port, pReply, msgLen, chandle); tTrace("%s cid:%d sid:%d id:%s, %s is sent with error code:%u pConn:%p", pServer->label, chann, sid, @@ -1093,17 +1185,17 @@ void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, short port, // parsing OK // internal communication is based on TAOS protocol, a trick here to make it efficient - pHeader->msgLen = msgLen - (int)sizeof(STaosHeader) + (int)sizeof(SIntMsg); - if (pHeader->spi) pHeader->msgLen -= sizeof(STaosDigest); + if (pHeader->spi) msgLen -= sizeof(STaosDigest); + msgLen -= (int)sizeof(STaosHeader); + pHeader->msgLen = msgLen + (int)sizeof(SIntMsg); - if ((pHeader->msgType & 1) == 0 && (pHeader->content[0] == TSDB_CODE_INVALID_VALUE)) { + if ((pHeader->msgType & 1U) == 0 && (pHeader->content[0] == TSDB_CODE_INVALID_VALUE)) { schedMsg.msg = NULL; // connection shall be closed } else { - schedMsg.msg = (char *)(&(pHeader->destId)); - // memcpy(schedMsg.msg, (char *)(&(pHeader->destId)), pHeader->msgLen); + pHeader = taosDecompressRpcMsg(pHeader, &schedMsg, msgLen); } - if (pHeader->msgType < TSDB_MSG_TYPE_HEARTBEAT || (rpcDebugFlag & 16)) { + if (pHeader->msgType < TSDB_MSG_TYPE_HEARTBEAT || (rpcDebugFlag & 16U)) { tTrace("%s cid:%d sid:%d id:%s, %s is put into queue, msgLen:%d pConn:%p pTimer:%p", pServer->label, chann, sid, pHeader->meterId, taosMsg[pHeader->msgType], pHeader->msgLen, pConn, pConn->pTimer); } @@ -1134,10 +1226,14 @@ int taosSendMsgToPeerH(void *thandle, char *pCont, int contLen, void *ahandle) { pServer = pConn->pServer; pChann = pServer->channList + pConn->chann; pHeader = (STaosHeader *)(pCont - sizeof(STaosHeader)); + pHeader->destIp = pConn->peerIp; msg = (char *)pHeader; - msgLen = contLen + (int32_t)sizeof(STaosHeader); - if ((pHeader->msgType & 1) == 0 && pConn->localPort) pHeader->port = pConn->localPort; + if ((pHeader->msgType & 1U) == 0 && pConn->localPort) pHeader->port = pConn->localPort; + + contLen = taosCompressRpcMsg(pCont, contLen); + + msgLen = contLen + (int32_t)sizeof(STaosHeader); if (pConn->spi) { // add auth part @@ -1154,7 +1250,7 @@ int taosSendMsgToPeerH(void *thandle, char *pCont, int contLen, void *ahandle) { pthread_mutex_lock(&pChann->mutex); msgType = pHeader->msgType; - if ((msgType & 1) == 0) { + if ((msgType & 1U) == 0) { // response pConn->inType = 0; tfree(pConn->pRspMsg); @@ -1246,6 +1342,7 @@ void taosProcessTaosTimer(void *param, void *tmrId) { STaosHeader *pHeader = NULL; SRpcConn * pConn = (SRpcConn *)param; int msgLen; + int reportDisc = 0; if (pConn->signature != param) { tError("pConn Signature:0x%x, pConn:0x%x not matched", pConn->signature, param); @@ -1295,13 +1392,7 @@ void taosProcessTaosTimer(void *param, void *tmrId) { pConn->sid, pConn->meterId, taosMsg[pConn->outType], pConn->peerIpstr, pConn->peerPort, pConn); if (pConn->rspReceived == 0) { pConn->rspReceived = 1; - - SSchedMsg schedMsg; - schedMsg.fp = taosProcessSchedMsg; - schedMsg.msg = NULL; - schedMsg.ahandle = pConn->ahandle; - schedMsg.thandle = pConn; - taosScheduleTask(pChann->qhandle, &schedMsg); + reportDisc = 1; } } } @@ -1313,9 +1404,10 @@ void taosProcessTaosTimer(void *param, void *tmrId) { pthread_mutex_unlock(&pChann->mutex); + if (reportDisc) taosReportDisconnection(pChann, pConn); } -void taosGetRpcConnInfo(void *thandle, uint32_t *peerId, uint32_t *peerIp, short *peerPort, int *cid, int *sid) { +void taosGetRpcConnInfo(void *thandle, uint32_t *peerId, uint32_t *peerIp, uint16_t *peerPort, int *cid, int *sid) { SRpcConn *pConn = (SRpcConn *)thandle; *peerId = pConn->peerId; @@ -1359,22 +1451,19 @@ void taosStopRpcConn(void *thandle) { tTrace("%s cid:%d sid:%d id:%s, stop the connection pConn:%p", pServer->label, pConn->chann, pConn->sid, pConn->meterId, pConn); + int reportDisc = 0; pthread_mutex_lock(&pChann->mutex); if (pConn->outType) { pConn->rspReceived = 1; - SSchedMsg schedMsg; - schedMsg.fp = taosProcessSchedMsg; - schedMsg.msg = NULL; - schedMsg.ahandle = pConn->ahandle; - schedMsg.thandle = pConn; + reportDisc = 1; pthread_mutex_unlock(&pChann->mutex); - - taosScheduleTask(pChann->qhandle, &schedMsg); } else { pthread_mutex_unlock(&pChann->mutex); taosCloseRpcConn(pConn); } + + if (reportDisc) taosReportDisconnection(pChann, pConn); } int taosAuthenticateMsg(uint8_t *pMsg, int msgLen, uint8_t *pAuth, uint8_t *pKey) { diff --git a/src/rpc/src/tstring.c b/src/rpc/src/tstring.c index c7afe54d9a4d328ef07d97198b7a50695a504240..a4fc2b2c71d7ab56597e6943525dce59f4f5fe74 100644 --- a/src/rpc/src/tstring.c +++ b/src/rpc/src/tstring.c @@ -145,7 +145,7 @@ char *tsError[] = {"success", "not online", "send failed", "not active session", // 20 - "insert failed", + "invalid vnode id", "App error", "invalid IE", "invalid value", @@ -178,7 +178,7 @@ char *tsError[] = {"success", "others", "can't remove dnode which is master", "wrong schema", - "no results", + "vnode not active(not created yet or dropped already)", "num of users execeed maxUsers", //55 "num of databases execeed maxDbs", "num of tables execeed maxTables", @@ -233,9 +233,14 @@ char *tsError[] = {"success", "invalid query message", "timestamp disordered in cache block", "timestamp disordered in file block", - "invalid commit log", //110 - "server no disk space", + "invalid commit log", + "server no disk space", //110 "only super table has metric meta info", "tags value not unique for join", "invalid submit message", + "not active table(not created yet or dropped already)", + "invalid table id", // 115 + "invalid vnode status", + "failed to lock resources", + "table id/uid mismatch", // 118 }; diff --git a/src/rpc/src/ttcpclient.c b/src/rpc/src/ttcpclient.c index e12f1e1728ec96a97326b5278c7ac997a0f6b89c..3d39be92fe4fd1c4476e35d56b88ed2fa5c9e474 100644 --- a/src/rpc/src/ttcpclient.c +++ b/src/rpc/src/ttcpclient.c @@ -13,15 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taosmsg.h" #include "tlog.h" @@ -39,7 +30,7 @@ typedef struct _tcp_fd { void * thandle; uint32_t ip; char ipstr[20]; - short port; + uint16_t port; struct _tcp_client *pTcp; struct _tcp_fd * prev, *next; } STcpFd; @@ -54,7 +45,7 @@ typedef struct _tcp_client { char label[12]; char ipstr[20]; void * shandle; // handle passed by upper layer during server initialization - void *(*processData)(char *data, int dataLen, unsigned int ip, short port, void *shandle, void *thandle, + void *(*processData)(char *data, int dataLen, unsigned int ip, uint16_t port, void *shandle, void *thandle, void *chandle); // char buffer[128000]; } STcpClient; @@ -203,7 +194,7 @@ static void *taosReadTcpData(void *param) { return NULL; } -void *taosInitTcpClient(char *ip, short port, char *label, int num, void *fp, void *shandle) { +void *taosInitTcpClient(char *ip, uint16_t port, char *label, int num, void *fp, void *shandle) { STcpClient * pTcp; pthread_attr_t thattr; @@ -238,7 +229,7 @@ void *taosInitTcpClient(char *ip, short port, char *label, int num, void *fp, vo return NULL; } - tTrace("%s TCP client is initialized, ip:%s port:%u", label, ip, port); + tTrace("%s TCP client is initialized, ip:%s port:%hu", label, ip, port); return pTcp; } @@ -251,7 +242,7 @@ void taosCloseTcpClientConnection(void *chandle) { taosCleanUpTcpFdObj(pFdObj); } -void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, short port) { +void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, uint16_t port) { STcpClient * pTcp = (STcpClient *)shandle; STcpFd * pFdObj; struct epoll_event event; @@ -310,12 +301,12 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, short pthread_mutex_unlock(&(pTcp->mutex)); - tTrace("%s TCP connection to ip:%s port:%u is created, numOfFds:%d", pTcp->label, ip, port, pTcp->numOfFds); + tTrace("%s TCP connection to ip:%s port:%hu is created, numOfFds:%d", pTcp->label, ip, port, pTcp->numOfFds); return pFdObj; } -int taosSendTcpClientData(uint32_t ip, short port, char *data, int len, void *chandle) { +int taosSendTcpClientData(uint32_t ip, uint16_t port, char *data, int len, void *chandle) { STcpFd *pFdObj = (STcpFd *)chandle; if (chandle == NULL) return -1; diff --git a/src/rpc/src/ttcpserver.c b/src/rpc/src/ttcpserver.c index b6b0e07230773fdf6633cba5b1b90ac37091c9a6..29ada20bc427455edf8c5b771178a33aa8214ba3 100644 --- a/src/rpc/src/ttcpserver.c +++ b/src/rpc/src/ttcpserver.c @@ -13,16 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taosmsg.h" #include "tlog.h" @@ -42,7 +32,7 @@ typedef struct _fd_obj { void * thandle; // handle from upper layer, like TAOS char ipstr[TAOS_IPv4ADDR_LEN]; unsigned int ip; - unsigned short port; + uint16_t port; struct _thread_obj *pThreadObj; struct _fd_obj * prev, *next; } SFdObj; @@ -58,13 +48,13 @@ typedef struct _thread_obj { char label[12]; // char buffer[128000]; // buffer to receive data void *shandle; // handle passed by upper layer during server initialization - void *(*processData)(char *data, int dataLen, unsigned int ip, short port, void *shandle, void *thandle, + void *(*processData)(char *data, int dataLen, unsigned int ip, uint16_t port, void *shandle, void *thandle, void *chandle); } SThreadObj; typedef struct { char ip[40]; - short port; + uint16_t port; char label[12]; int numOfThreads; void * shandle; @@ -195,8 +185,9 @@ static void taosProcessTcpData(void *param) { void *buffer = malloc(1024); int headLen = taosReadMsg(pFdObj->fd, buffer, sizeof(STaosHeader)); + if (headLen != sizeof(STaosHeader)) { - tError("%s read error, headLen:%d", pThreadObj->label, headLen); + tError("%s read error, headLen:%d, errno:%d", pThreadObj->label, headLen, errno); taosCleanUpFdObj(pFdObj); tfree(buffer); continue; @@ -218,7 +209,7 @@ static void taosProcessTcpData(void *param) { continue; } - pFdObj->thandle = (*(pThreadObj->processData))(buffer, dataLen, pFdObj->ip, (int16_t)pFdObj->port, + pFdObj->thandle = (*(pThreadObj->processData))(buffer, dataLen, pFdObj->ip, pFdObj->port, pThreadObj->shandle, pFdObj->thandle, pFdObj); if (pFdObj->thandle == NULL) taosCleanUpFdObj(pFdObj); @@ -241,10 +232,10 @@ void taosAcceptTcpConnection(void *arg) { sockFd = taosOpenTcpServerSocket(pServerObj->ip, pServerObj->port); if (sockFd < 0) { - tError("%s failed to open TCP socket, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + tError("%s failed to open TCP socket, ip:%s, port:%hu", pServerObj->label, pServerObj->ip, pServerObj->port); return; } else { - tTrace("%s TCP server is ready, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + tTrace("%s TCP server is ready, ip:%s, port:%hu", pServerObj->label, pServerObj->ip, pServerObj->port); } while (1) { @@ -256,7 +247,7 @@ void taosAcceptTcpConnection(void *arg) { continue; } - tTrace("%s TCP connection from ip:%s port:%u", pServerObj->label, inet_ntoa(clientAddr.sin_addr), + tTrace("%s TCP connection from ip:%s port:%hu", pServerObj->label, inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); taosKeepTcpAlive(connFd); @@ -301,7 +292,7 @@ void taosAcceptTcpConnection(void *arg) { pthread_mutex_unlock(&(pThreadObj->threadMutex)); - tTrace("%s TCP thread:%d, a new connection, ip:%s port:%u, numOfFds:%d", pServerObj->label, pThreadObj->threadId, + tTrace("%s TCP thread:%d, a new connection, ip:%s port:%hu, numOfFds:%d", pServerObj->label, pThreadObj->threadId, pFdObj->ipstr, pFdObj->port, pThreadObj->numOfFds); // pick up next thread for next connection @@ -323,10 +314,10 @@ void taosAcceptUDConnection(void *arg) { sockFd = taosOpenUDServerSocket(pServerObj->ip, pServerObj->port); if (sockFd < 0) { - tError("%s failed to open UD socket, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + tError("%s failed to open UD socket, ip:%s, port:%hu", pServerObj->label, pServerObj->ip, pServerObj->port); return; } else { - tTrace("%s UD server is ready, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + tTrace("%s UD server is ready, ip:%s, port:%hu", pServerObj->label, pServerObj->ip, pServerObj->port); } while (1) { @@ -383,7 +374,7 @@ void taosAcceptUDConnection(void *arg) { } } -void *taosInitTcpServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle) { +void *taosInitTcpServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle) { int i; SServerObj * pServerObj; pthread_attr_t thattr; @@ -451,7 +442,7 @@ void *taosInitTcpServer(char *ip, short port, char *label, int numOfThreads, voi } */ pthread_attr_destroy(&thattr); - tTrace("%s TCP server is initialized, ip:%s port:%u numOfThreads:%d", label, ip, port, numOfThreads); + tTrace("%s TCP server is initialized, ip:%s port:%hu numOfThreads:%d", label, ip, port, numOfThreads); return (void *)pServerObj; } @@ -477,7 +468,7 @@ void taosListTcpConnection(void *handle, char *buffer) { msg = msg + strlen(msg); pFdObj = pThreadObj->pHead; while (pFdObj) { - sprintf(" ip:%s port:%u\n", pFdObj->ipstr, pFdObj->port); + sprintf(" ip:%s port:%hu\n", pFdObj->ipstr, pFdObj->port); msg = msg + strlen(msg); numOfFds++; numOfConns++; @@ -496,7 +487,7 @@ void taosListTcpConnection(void *handle, char *buffer) { return; } -int taosSendTcpServerData(uint32_t ip, short port, char *data, int len, void *chandle) { +int taosSendTcpServerData(uint32_t ip, uint16_t port, char *data, int len, void *chandle) { SFdObj *pFdObj = (SFdObj *)chandle; if (chandle == NULL) return -1; diff --git a/src/rpc/src/tudp.c b/src/rpc/src/tudp.c index 1940c89e85eecee1c8fa116cbe621ec9a0de9874..fb0b37d93baaf98a6caed35ec980de72090819f2 100644 --- a/src/rpc/src/tudp.c +++ b/src/rpc/src/tudp.c @@ -13,15 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taosmsg.h" #include "thash.h" @@ -44,8 +35,8 @@ typedef struct { void * signature; int index; int fd; - short port; // peer port - short localPort; // local port + uint16_t port; // peer port + uint16_t localPort; // local port char label[12]; // copy from udpConnSet; pthread_t thread; pthread_mutex_t mutex; @@ -53,7 +44,7 @@ typedef struct { void * hash; void * shandle; // handle passed by upper layer during server initialization void * pSet; - void *(*processData)(char *data, int dataLen, unsigned int ip, short port, void *shandle, void *thandle, + void *(*processData)(char *data, int dataLen, unsigned int ip, uint16_t port, void *shandle, void *thandle, void *chandle); char buffer[RPC_MAX_UDP_SIZE]; // buffer to receive data } SUdpConn; @@ -62,21 +53,21 @@ typedef struct { int index; int server; char ip[16]; // local IP - short port; // local Port + uint16_t port; // local Port void * shandle; // handle passed by upper layer during server initialization int threads; char label[12]; void * tmrCtrl; pthread_t tcpThread; int tcpFd; - void *(*fp)(char *data, int dataLen, uint32_t ip, short port, void *shandle, void *thandle, void *chandle); + void *(*fp)(char *data, int dataLen, uint32_t ip, uint16_t port, void *shandle, void *thandle, void *chandle); SUdpConn udpConn[]; } SUdpConnSet; typedef struct { void * signature; uint32_t ip; // dest IP - short port; // dest Port + uint16_t port; // dest Port SUdpConn * pConn; struct sockaddr_in destAdd; void * msgHdr; @@ -153,12 +144,12 @@ void *taosReadTcpData(void *argv) { pInfo->msgLen = (int32_t)htonl((uint32_t)pInfo->msgLen); tinet_ntoa(ipstr, pMonitor->ip); - tTrace("%s receive packet via TCP:%s:%d, msgLen:%d, handle:0x%x, source:0x%08x dest:0x%08x tranId:%d", pSet->label, + tTrace("%s receive packet via TCP:%s:%hu, msgLen:%d, handle:0x%x, source:0x%08x dest:0x%08x tranId:%d", pSet->label, ipstr, pInfo->port, pInfo->msgLen, pInfo->handle, pHead->sourceId, pHead->destId, pHead->tranId); fd = taosOpenTcpClientSocket(ipstr, (int16_t)pInfo->port, tsLocalIp); if (fd < 0) { - tError("%s failed to open TCP client socket ip:%s:%d", pSet->label, ipstr, pInfo->port); + tError("%s failed to open TCP client socket ip:%s:%hu", pSet->label, ipstr, pInfo->port); pMonitor->pSet = NULL; return NULL; } @@ -189,7 +180,7 @@ void *taosReadTcpData(void *argv) { tError("%s failed to read data from server, msgLen:%d retLen:%d", pSet->label, pInfo->msgLen, retLen); tfree(buffer); } else { - (*pSet->fp)(buffer, pInfo->msgLen, pMonitor->ip, (int16_t)pInfo->port, pSet->shandle, NULL, pMonitor->pConn); + (*pSet->fp)(buffer, pInfo->msgLen, pMonitor->ip, pInfo->port, pSet->shandle, NULL, pMonitor->pConn); } } @@ -233,7 +224,7 @@ void *taosRecvUdpData(void *param) { struct sockaddr_in sourceAdd; unsigned int addLen, dataLen; SUdpConn * pConn = (SUdpConn *)param; - short port; + uint16_t port; int minSize = sizeof(STaosHeader); memset(&sourceAdd, 0, sizeof(sourceAdd)); @@ -251,7 +242,7 @@ void *taosRecvUdpData(void *param) { continue; } - port = (int16_t)ntohs(sourceAdd.sin_port); + port = ntohs(sourceAdd.sin_port); int processedLen = 0, leftLen = 0; int msgLen = 0; @@ -316,7 +307,7 @@ void *taosTransferDataViaTcp(void *argv) { if (handle == 0) { // receive a packet from client - tTrace("%s data will be received via TCP from 0x%x:%d", pSet->label, pTransfer->ip, pTransfer->port); + tTrace("%s data will be received via TCP from 0x%x:%hu", pSet->label, pTransfer->ip, pTransfer->port); retLen = taosReadMsg(connFd, &head, sizeof(STaosHeader)); if (retLen != (int)sizeof(STaosHeader)) { tError("%s failed to read msg header, retLen:%d", pSet->label, retLen); @@ -354,7 +345,7 @@ void *taosTransferDataViaTcp(void *argv) { tError("%s failed to read data from client, leftLen:%d retLen:%d, error:%s", pSet->label, leftLen, retLen, strerror(errno)); } else { - tTrace("%s data is received from client via TCP from 0x%x:%d, msgLen:%d", pSet->label, pTransfer->ip, + tTrace("%s data is received from client via TCP from 0x%x:%hu, msgLen:%d", pSet->label, pTransfer->ip, pTransfer->port, msgLen); pSet->index = (pSet->index + 1) % pSet->threads; SUdpConn *pConn = pSet->udpConn + pSet->index; @@ -386,6 +377,7 @@ void *taosTransferDataViaTcp(void *argv) { pThead->tcp = 1; pThead->msgType = (char)(pHeader->msgType - 1); pThead->msgLen = (int32_t)htonl(sizeof(STaosHeader)); + uint32_t id = pThead->sourceId; pThead->sourceId = pThead->destId; pThead->destId = id; pMonitor->ip = pTransfer->ip; pMonitor->port = pTransfer->port; pMonitor->pSet = pSet; @@ -397,7 +389,7 @@ void *taosTransferDataViaTcp(void *argv) { if (retLen != msgLen) { tError("%s failed to send data to client, msgLen:%d retLen:%d", pSet->label, msgLen, retLen); } else { - tTrace("%s data is sent to client successfully via TCP to 0x%x:%d, size:%d", pSet->label, pTransfer->ip, + tTrace("%s data is sent to client successfully via TCP to 0x%x:%hu, size:%d", pSet->label, pTransfer->ip, pTransfer->port, msgLen); } } @@ -422,13 +414,13 @@ void *taosUdpTcpConnection(void *argv) { pSet->tcpFd = taosOpenTcpServerSocket(pSet->ip, pSet->port); if (pSet->tcpFd < 0) { - tPrint("%s failed to create TCP socket %s:%d for UDP server, reason:%s", pSet->label, pSet->ip, pSet->port, + tPrint("%s failed to create TCP socket %s:%hu for UDP server, reason:%s", pSet->label, pSet->ip, pSet->port, strerror(errno)); taosKillSystem(); return NULL; } - tTrace("%s UDP server is created, ip:%s:%d", pSet->label, pSet->ip, pSet->port); + tTrace("%s UDP server is created, ip:%s:%hu", pSet->label, pSet->ip, pSet->port); pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); @@ -464,7 +456,7 @@ void *taosUdpTcpConnection(void *argv) { return NULL; } -void *taosInitUdpConnection(char *ip, short port, char *label, int threads, void *fp, void *shandle) { +void *taosInitUdpConnection(char *ip, uint16_t port, char *label, int threads, void *fp, void *shandle) { pthread_attr_t thAttr; SUdpConn * pConn; SUdpConnSet * pSet; @@ -497,13 +489,13 @@ void *taosInitUdpConnection(char *ip, short port, char *label, int threads, void pthread_attr_init(&thAttr); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - short ownPort; + uint16_t ownPort; for (int i = 0; i < threads; ++i) { pConn = pSet->udpConn + i; - ownPort = (int16_t)(port ? port + i : 0); + ownPort = (port ? port + i : 0); pConn->fd = taosOpenUdpSocket(ip, ownPort); if (pConn->fd < 0) { - tError("%s failed to open UDP socket %s:%d", label, ip, port); + tError("%s failed to open UDP socket %s:%hu", label, ip, port); taosCleanUpUdpConnection(pSet); return NULL; } @@ -515,6 +507,8 @@ void *taosInitUdpConnection(char *ip, short port, char *label, int threads, void pConn->localPort = (int16_t)ntohs(sin.sin_port); } + strcpy(pConn->label, label); + if (pthread_create(&pConn->thread, &thAttr, taosRecvUdpData, pConn) != 0) { tError("%s failed to create thread to process UDP data, reason:%s", label, strerror(errno)); taosCloseSocket(pConn->fd); @@ -522,7 +516,6 @@ void *taosInitUdpConnection(char *ip, short port, char *label, int threads, void return NULL; } - strcpy(pConn->label, label); pConn->shandle = shandle; pConn->processData = fp; pConn->index = i; @@ -537,12 +530,12 @@ void *taosInitUdpConnection(char *ip, short port, char *label, int threads, void } pthread_attr_destroy(&thAttr); - tTrace("%s UDP connection is initialized, ip:%s port:%u threads:%d", label, ip, port, threads); + tTrace("%s UDP connection is initialized, ip:%s port:%hu threads:%d", label, ip, port, threads); return pSet; } -void *taosInitUdpServer(char *ip, short port, char *label, int threads, void *fp, void *shandle) { +void *taosInitUdpServer(char *ip, uint16_t port, char *label, int threads, void *fp, void *shandle) { SUdpConnSet *pSet; pSet = taosInitUdpConnection(ip, port, label, threads, fp, shandle); if (pSet == NULL) return NULL; @@ -563,7 +556,7 @@ void *taosInitUdpServer(char *ip, short port, char *label, int threads, void *fp return pSet; } -void *taosInitUdpClient(char *ip, short port, char *label, int threads, void *fp, void *shandle) { +void *taosInitUdpClient(char *ip, uint16_t port, char *label, int threads, void *fp, void *shandle) { return taosInitUdpConnection(ip, port, label, threads, fp, shandle); } @@ -599,7 +592,7 @@ void taosCleanUpUdpConnection(void *handle) { tfree(pSet); } -void *taosOpenUdpConnection(void *shandle, void *thandle, char *ip, short port) { +void *taosOpenUdpConnection(void *shandle, void *thandle, char *ip, uint16_t port) { SUdpConnSet *pSet = (SUdpConnSet *)shandle; pSet->index = (pSet->index + 1) % pSet->threads; @@ -607,7 +600,7 @@ void *taosOpenUdpConnection(void *shandle, void *thandle, char *ip, short port) SUdpConn *pConn = pSet->udpConn + pSet->index; pConn->port = port; - tTrace("%s UDP connection is setup, ip: %s:%d, local: %s:%d", pConn->label, ip, port, pSet->ip, + tTrace("%s UDP connection is setup, ip: %s:%hu, local: %s:%d", pConn->label, ip, port, pSet->ip, ntohs((uint16_t)pConn->localPort)); return pConn; @@ -651,7 +644,7 @@ void taosProcessUdpBufTimer(void *param, void *tmrId) { if (pBuf) taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); } -SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, short port) { +SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, uint16_t port) { SUdpBuf *pBuf = (SUdpBuf *)malloc(sizeof(SUdpBuf)); memset(pBuf, 0, sizeof(SUdpBuf)); @@ -661,7 +654,7 @@ SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, short port) { pBuf->destAdd.sin_family = AF_INET; pBuf->destAdd.sin_addr.s_addr = ip; - pBuf->destAdd.sin_port = (uint16_t)htons((uint16_t)port); + pBuf->destAdd.sin_port = (uint16_t)htons(port); taosInitMsgHdr(&(pBuf->msgHdr), &(pBuf->destAdd), RPC_MAX_UDP_PKTS); pBuf->signature = pBuf; taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); @@ -672,7 +665,7 @@ SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, short port) { return pBuf; } -int taosSendPacketViaTcp(uint32_t ip, short port, char *data, int dataLen, void *chandle) { +int taosSendPacketViaTcp(uint32_t ip, uint16_t port, char *data, int dataLen, void *chandle) { SUdpConn * pConn = (SUdpConn *)chandle; SUdpConnSet *pSet = (SUdpConnSet *)pConn->pSet; int code = -1, retLen, msgLen; @@ -689,13 +682,13 @@ int taosSendPacketViaTcp(uint32_t ip, short port, char *data, int dataLen, void SPacketInfo *pInfo = (SPacketInfo *)pHead->content; pInfo->handle = (uint64_t)data; - pInfo->port = (uint16_t)pSet->port; + pInfo->port = pSet->port; pInfo->msgLen = pHead->msgLen; msgLen = sizeof(STaosHeader) + sizeof(SPacketInfo); pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); code = taosSendUdpData(ip, port, buffer, msgLen, chandle); - tTrace("%s data from server will be sent via TCP:%d, msgType:%d, length:%d, handle:0x%x", pSet->label, pInfo->port, + tTrace("%s data from server will be sent via TCP:%hu, msgType:%d, length:%d, handle:0x%x", pSet->label, pInfo->port, pHead->msgType, htonl((uint32_t)pInfo->msgLen), pInfo->handle); if (code > 0) code = dataLen; } else { @@ -715,7 +708,7 @@ int taosSendPacketViaTcp(uint32_t ip, short port, char *data, int dataLen, void tinet_ntoa(ipstr, ip); int fd = taosOpenTcpClientSocket(ipstr, pConn->port, tsLocalIp); if (fd < 0) { - tError("%s failed to open TCP socket to:%s:%u to send packet", pSet->label, ipstr, pConn->port); + tError("%s failed to open TCP socket to:%s:%hu to send packet", pSet->label, ipstr, pConn->port); } else { SHandleViaTcp handleViaTcp; taosInitHandleViaTcp(&handleViaTcp, 0); @@ -743,7 +736,7 @@ int taosSendPacketViaTcp(uint32_t ip, short port, char *data, int dataLen, void return code; } -int taosSendUdpData(uint32_t ip, short port, char *data, int dataLen, void *chandle) { +int taosSendUdpData(uint32_t ip, uint16_t port, char *data, int dataLen, void *chandle) { SUdpConn *pConn = (SUdpConn *)chandle; SUdpBuf * pBuf; @@ -756,7 +749,7 @@ int taosSendUdpData(uint32_t ip, short port, char *data, int dataLen, void *chan memset(&destAdd, 0, sizeof(destAdd)); destAdd.sin_family = AF_INET; destAdd.sin_addr.s_addr = ip; - destAdd.sin_port = htons((uint16_t)port); + destAdd.sin_port = htons(port); int ret = (int)sendto(pConn->fd, data, (size_t)dataLen, 0, (struct sockaddr *)&destAdd, sizeof(destAdd)); tTrace("%s msg is sent to 0x%x:%hu len:%d ret:%d localPort:%hu chandle:0x%x", pConn->label, destAdd.sin_addr.s_addr, diff --git a/src/sdb/CMakeLists.txt b/src/sdb/CMakeLists.txt index 76b407fdedc06f0463baa6b6261c3e6bf4c7d29e..6b29eefde6c4792f691fae9f0d5c370751460c93 100644 --- a/src/sdb/CMakeLists.txt +++ b/src/sdb/CMakeLists.txt @@ -5,11 +5,11 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) INCLUDE_DIRECTORIES(inc) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(sdb ${SRC}) TARGET_LINK_LIBRARIES(sdb trpc) IF (TD_CLUSTER) TARGET_LINK_LIBRARIES(sdb sdb_cluster) ENDIF () -ENDIF () \ No newline at end of file +ENDIF () diff --git a/src/sdb/inc/sdbint.h b/src/sdb/inc/sdbint.h index 3327c1f7317cc6483346a7a7a7259b88a69ef6f7..c5b4f4e4aeccf6bd8664e067e7d74146e023b10b 100644 --- a/src/sdb/inc/sdbint.h +++ b/src/sdb/inc/sdbint.h @@ -127,7 +127,7 @@ typedef struct { } SMnodeStatus; typedef struct { - char dbId; + uint8_t dbId; char type; uint64_t version; short dataLen; diff --git a/src/sdb/src/hashstr.c b/src/sdb/src/hashstr.c index 1a9a7fefb4f1f2eafd6aa485f6217811841ddbce..4b9aa3392853424618e574b03a50b17a51f2b320 100644 --- a/src/sdb/src/hashstr.c +++ b/src/sdb/src/hashstr.c @@ -13,13 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "tsdb.h" #define MAX_STR_LEN 40 diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c index 59e3d7e0398f0c1af175f867b7408fe3db545d84..0efa81866f3b683b4027662a6b3d015e74d391ce 100644 --- a/src/sdb/src/sdbEngine.c +++ b/src/sdb/src/sdbEngine.c @@ -13,19 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "sdb.h" #include "sdbint.h" @@ -36,7 +24,12 @@ extern char version[]; const int16_t sdbFileVersion = 0; int sdbExtConns = 0; + +#ifdef CLUSTER +int sdbMaster = 0; +#else int sdbMaster = 1; +#endif void *(*sdbInitIndexFp[])(int maxRows, int dataSize) = {sdbOpenStrHash, sdbOpenIntHash, sdbOpenIntHash}; @@ -110,7 +103,7 @@ int sdbOpenSdbFile(SSdbTable *pTable) { pTable->header.swVersion = swVersion.iversion; pTable->header.sdbFileVersion = sdbFileVersion; if (taosCalcChecksumAppend(0, (uint8_t *)(&pTable->header), size) < 0) { - sdbError("failed to get file header checksum, file: %s", pTable->fn); + sdbError("failed to get file header checksum, file:%s", pTable->fn); tclose(pTable->fd); return -1; } @@ -132,17 +125,17 @@ int sdbOpenSdbFile(SSdbTable *pTable) { ssize_t tsize = read(pTable->fd, &(pTable->header), size); if (tsize < size) { - sdbError("failed to read sdb file header, file: %s", pTable->fn); + sdbError("failed to read sdb file header, file:%s", pTable->fn); tclose(pTable->fd); return -1; } if (pTable->header.swVersion != swVersion.iversion) { - sdbWarn("sdb file %s version not match software version", pTable->fn); + sdbWarn("sdb file:%s version not match software version", pTable->fn); } if (!taosCheckChecksumWhole((uint8_t *)(&pTable->header), size)) { - sdbError("sdb file header is broken since checksum mismatch, file: %s", pTable->fn); + sdbError("sdb file header is broken since checksum mismatch, file:%s", pTable->fn); tclose(pTable->fd); return -1; } @@ -178,6 +171,7 @@ int sdbInitTableByFile(SSdbTable *pTable) { void * pMetaRow = NULL; int total_size = 0; int real_size = 0; + int maxAutoIndex = 0; oldId = pTable->id; if (sdbOpenSdbFile(pTable) < 0) return -1; @@ -185,17 +179,19 @@ int sdbInitTableByFile(SSdbTable *pTable) { total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); SRowHead *rowHead = (SRowHead *)malloc(total_size); if (rowHead == NULL) { - sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + sdbError("failed to allocate row head memory, sdb:%s", pTable->name); return -1; } + sdbTrace("open sdb file:%s for read", pTable->fn); + // Loop to read sdb file row by row while (1) { memset(rowHead, 0, total_size); bytes = read(pTable->fd, rowHead, sizeof(SRowHead)); if (bytes < 0) { - sdbError("failed to read sdb file: %s", pTable->fn); + sdbError("failed to read sdb file:%s", pTable->fn); goto sdb_exit1; } @@ -208,8 +204,8 @@ int sdbInitTableByFile(SSdbTable *pTable) { } if (rowHead->rowSize < 0 || rowHead->rowSize > pTable->maxRowSize) { - sdbError("error row size in sdb file: %s rowSize: %d maxRowSize: %d", pTable->fn, rowHead->rowSize, - pTable->maxRowSize); + sdbError("error row size in sdb file:%s, id:%d rowSize:%d maxRowSize:%d", + pTable->fn, rowHead->id, rowHead->rowSize, pTable->maxRowSize); pTable->size += sizeof(SRowHead); continue; } @@ -220,13 +216,13 @@ int sdbInitTableByFile(SSdbTable *pTable) { bytes = read(pTable->fd, rowHead->data, rowHead->rowSize + sizeof(TSCKSUM)); if (bytes < rowHead->rowSize + sizeof(TSCKSUM)) { // TODO: Here may cause pTable->size not end of the file - sdbError("failed to read sdb file: %s id: %d rowSize: %d", pTable->fn, rowHead->id, rowHead->rowSize); + sdbError("failed to read sdb file:%s id:%d rowSize:%d", pTable->fn, rowHead->id, rowHead->rowSize); break; } real_size = sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM); if (!taosCheckChecksumWhole((uint8_t *)rowHead, real_size)) { - sdbError("error sdb checksum, sdb: %s id: %d, skip", pTable->name, rowHead->id); + sdbError("error sdb checksum, sdb:%s id:%d, skip", pTable->name, rowHead->id); pTable->size += real_size; continue; } @@ -237,7 +233,7 @@ int sdbInitTableByFile(SSdbTable *pTable) { if (pMetaRow == NULL) { // New object if (rowHead->id < 0) { /* assert(0); */ - sdbError("error sdb negative id: %d, sdb: %s, skip", rowHead->id, pTable->name); + sdbError("error sdb negative id:%d, sdb:%s, skip", rowHead->id, pTable->name); } else { rowMeta.id = rowHead->id; // TODO: Get rid of the rowMeta.offset and rowSize @@ -245,10 +241,18 @@ int sdbInitTableByFile(SSdbTable *pTable) { rowMeta.rowSize = rowHead->rowSize; rowMeta.row = (*(pTable->appTool))(SDB_TYPE_DECODE, NULL, rowHead->data, rowHead->rowSize, NULL); (*sdbAddIndexFp[pTable->keyType])(pTable->iHandle, rowMeta.row, &rowMeta); - if (pTable->keyType == SDB_KEYTYPE_AUTO) pTable->autoIndex++; + if (pTable->keyType == SDB_KEYTYPE_AUTO) { + pTable->autoIndex++; + maxAutoIndex = MAX(maxAutoIndex, *(int32_t*)rowHead->data); + } pTable->numOfRows++; } } else { // already exists + if (pTable->keyType == SDB_KEYTYPE_AUTO) { + pTable->autoIndex++; + maxAutoIndex = MAX(maxAutoIndex, *(int32_t *) rowHead->data); + } + if (rowHead->id < 0) { // Delete the object (*sdbDeleteIndexFp[pTable->keyType])(pTable->iHandle, rowHead->data); (*(pTable->appTool))(SDB_TYPE_DESTROY, pMetaRow, NULL, 0, NULL); @@ -265,6 +269,10 @@ int sdbInitTableByFile(SSdbTable *pTable) { if (pTable->id < abs(rowHead->id)) pTable->id = abs(rowHead->id); } + if (pTable->keyType == SDB_KEYTYPE_AUTO) { + pTable->autoIndex = maxAutoIndex; + } + sdbVersion += (pTable->id - oldId); if (numOfDels > pTable->maxRows / 4) sdbSaveSnapShot(pTable); @@ -279,7 +287,7 @@ sdb_exit1: return -1; } -void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, uint8_t keyType, char *directory, void *(*appTool)(char, void *, char *, int, int *)) { SSdbTable *pTable = (SSdbTable *)malloc(sizeof(SSdbTable)); if (pTable == NULL) return NULL; @@ -351,10 +359,41 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { int real_size = 0; /* char action = SDB_TYPE_INSERT; */ - if (pTable == NULL) return -1; + if (pTable == NULL) { + sdbError("sdb tables is null"); + return -1; + } if ((pTable->keyType != SDB_KEYTYPE_AUTO) || *((int64_t *)row)) - if (sdbGetRow(handle, row)) return -1; + if (sdbGetRow(handle, row)) { + if (strcmp(pTable->name, "mnode") == 0) { + /* + * The first mnode created when the system just start, so the insert action may failed + * see sdbPeer.c : sdbInitPeers + */ + pTable->id++; + sdbVersion++; + sdbPrint("table:%s, record:%s already exist, think it successed, sdbVersion:%ld id:%d", + pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); + return 0; + } else { + switch (pTable->keyType) { + case SDB_KEYTYPE_STRING: + sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, (char *)row, sdbVersion, pTable->id); + break; + case SDB_KEYTYPE_UINT32: //dnodes or mnodes + sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); + break; + case SDB_KEYTYPE_AUTO: + sdbError("table:%s, failed to insert record:%d sdbVersion:%ld id:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id); + break; + default: + sdbError("table:%s, failed to insert record sdbVersion:%ld id:%d", pTable->name, sdbVersion, pTable->id); + break; + } + return -1; + } + } total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); SRowHead *rowHead = (SRowHead *)malloc(total_size); @@ -378,7 +417,7 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { pTable->id++; sdbVersion++; if (pTable->keyType == SDB_KEYTYPE_AUTO) { - // TODO: here need to change + // TODO:here need to change *((uint32_t *)pObj) = ++pTable->autoIndex; (*(pTable->appTool))(SDB_TYPE_ENCODE, pObj, rowHead->data, pTable->maxRowSize, &(rowHead->rowSize)); } @@ -388,7 +427,7 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { rowHead->delimiter = SDB_DELIMITER; rowHead->id = pTable->id; if (taosCalcChecksumAppend(0, (uint8_t *)rowHead, real_size) < 0) { - sdbError("failed to get checksum while inserting, sdb: %s", pTable->name); + sdbError("failed to get checksum while inserting, sdb:%s", pTable->name); pthread_mutex_unlock(&pTable->mutex); tfree(rowHead); return -1; @@ -413,24 +452,26 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { pTable->numOfRows++; switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace( - "table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", - pTable->name, (char *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + pTable->name, (char *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); + break; + case SDB_KEYTYPE_UINT32: //dnodes or mnodes + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; - case SDB_KEYTYPE_UINT32: case SDB_KEYTYPE_AUTO: - sdbTrace( - "table:%s, a record is inserted:%d, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", - pTable->name, *(int32_t *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); + sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + pTable->name, *(int32_t *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; default: - sdbTrace( - "table:%s, a record is inserted, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", - pTable->name, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); + sdbTrace("table:%s, a record is inserted, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + pTable->name, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; } id = rowMeta.id; + } else { + sdbError("table:%s, failed to insert record", pTable->name); } tfree(rowHead); @@ -482,7 +523,7 @@ int sdbDeleteRow(void *handle, void *row) { total_size = sizeof(SRowHead) + rowSize + sizeof(TSCKSUM); rowHead = (SRowHead *)malloc(total_size); if (rowHead == NULL) { - sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + sdbError("failed to allocate row head memory, sdb:%s", pTable->name); return -1; } memset(rowHead, 0, total_size); @@ -498,7 +539,7 @@ int sdbDeleteRow(void *handle, void *row) { rowHead->id = -(pTable->id); memcpy(rowHead->data, row, rowSize); if (taosCalcChecksumAppend(0, (uint8_t *)rowHead, total_size) < 0) { - sdbError("failed to get checksum while inserting, sdb: %s", pTable->name); + sdbError("failed to get checksum while inserting, sdb:%s", pTable->name); pthread_mutex_unlock(&pTable->mutex); tfree(rowHead); return -1; @@ -510,19 +551,20 @@ int sdbDeleteRow(void *handle, void *row) { sdbFinishCommit(pTable); pTable->numOfRows--; - // TODO: Change the update list here + // TODO:Change the update list here sdbAddIntoUpdateList(pTable, SDB_TYPE_DELETE, pMetaRow); switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace( - "table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", - pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); + break; + case SDB_KEYTYPE_UINT32: //dnodes or mnodes + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; - case SDB_KEYTYPE_UINT32: case SDB_KEYTYPE_AUTO: - sdbTrace( - "table:%s, a record is deleted:%d, sdbVersion:%ld id:%ld numOfRows:%d", - pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); + sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: sdbTrace("table:%s, a record is deleted, sdbVersion:%ld id:%ld numOfRows:%d", @@ -558,7 +600,24 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { if (pTable == NULL || row == NULL) return -1; pMeta = sdbGetRowMeta(handle, row); if (pMeta == NULL) { - sdbTrace("table:%s, record is not there, update failed", pTable->name); + switch (pTable->keyType) { + case SDB_KEYTYPE_STRING: + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + pTable->name, (char *) row, sdbVersion, pTable->id); + break; + case SDB_KEYTYPE_UINT32: //dnodes or mnodes + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + pTable->name, taosIpStr(*(int32_t *) row), sdbVersion, pTable->id); + break; + case SDB_KEYTYPE_AUTO: + sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%ld id:%d", + pTable->name, *(int32_t *) row, sdbVersion, pTable->id); + break; + default: + sdbError("table:%s, failed to update record, record is not there, sdbVersion:%ld id:%d", + pTable->name, sdbVersion, pTable->id); + break; + } return -1; } @@ -568,7 +627,7 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); SRowHead *rowHead = (SRowHead *)malloc(total_size); if (rowHead == NULL) { - sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + sdbError("failed to allocate row head memory, sdb:%s", pTable->name); return -1; } memset(rowHead, 0, total_size); @@ -597,7 +656,7 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { rowHead->delimiter = SDB_DELIMITER; rowHead->id = pTable->id; if (taosCalcChecksumAppend(0, (uint8_t *)rowHead, real_size) < 0) { - sdbError("failed to get checksum, sdb: %s id: %d", pTable->name, rowHead->id); + sdbError("failed to get checksum, sdb:%s id:%d", pTable->name, rowHead->id); pthread_mutex_unlock(&pTable->mutex); tfree(rowHead); return -1; @@ -615,15 +674,16 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace( - "table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", - pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); + break; + case SDB_KEYTYPE_UINT32: //dnodes or mnodes + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; - case SDB_KEYTYPE_UINT32: case SDB_KEYTYPE_AUTO: - sdbTrace( - "table:%s, a record is updated:%d, sdbVersion:%ld id:%ld numOfRows:%d", - pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); + sdbTrace("table:%s, a record is updated:%d, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: sdbTrace("table:%s, a record is updated, sdbVersion:%ld id:%ld numOfRows:%d", pTable->name, sdbVersion, @@ -652,7 +712,7 @@ int sdbBatchUpdateRow(void *handle, void *row, int rowSize) { if (pTable == NULL || row == NULL || rowSize <= 0) return -1; pMeta = sdbGetRowMeta(handle, row); if (pMeta == NULL) { - sdbTrace("table: %s, record is not there, batch update failed", pTable->name); + sdbTrace("table:%s, record is not there, batch update failed", pTable->name); return -1; } @@ -662,7 +722,7 @@ int sdbBatchUpdateRow(void *handle, void *row, int rowSize) { total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); SRowHead *rowHead = (SRowHead *)malloc(total_size); if (rowHead == NULL) { - sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + sdbError("failed to allocate row head memory, sdb:%s", pTable->name); return -1; } @@ -760,12 +820,14 @@ void sdbResetTable(SSdbTable *pTable) { return; } + sdbTrace("open sdb file:%s for update", pTable->fn); + while (1) { memset(rowHead, 0, total_size); bytes = read(pTable->fd, rowHead, sizeof(SRowHead)); if (bytes < 0) { - sdbError("failed to read sdb file: %s", pTable->fn); + sdbError("failed to read sdb file:%s", pTable->fn); tfree(rowHead); return; } @@ -779,7 +841,7 @@ void sdbResetTable(SSdbTable *pTable) { } if (rowHead->rowSize < 0 || rowHead->rowSize > pTable->maxRowSize) { - sdbError("error row size in sdb file: %s rowSize: %d maxRowSize: %d", pTable->fn, rowHead->rowSize, + sdbError("error row size in sdb file:%s rowSize:%d maxRowSize:%d", pTable->fn, rowHead->rowSize, pTable->maxRowSize); pTable->size += sizeof(SRowHead); continue; @@ -787,13 +849,13 @@ void sdbResetTable(SSdbTable *pTable) { bytes = read(pTable->fd, rowHead->data, rowHead->rowSize + sizeof(TSCKSUM)); if (bytes < rowHead->rowSize + sizeof(TSCKSUM)) { - sdbError("failed to read sdb file: %s id: %d rowSize: %d", pTable->fn, rowHead->id, rowHead->rowSize); + sdbError("failed to read sdb file:%s id:%d rowSize:%d", pTable->fn, rowHead->id, rowHead->rowSize); break; } real_size = sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM); if (!taosCheckChecksumWhole((uint8_t *)rowHead, real_size)) { - sdbError("error sdb checksum, sdb: %s id: %d, skip", pTable->name, rowHead->id); + sdbError("error sdb checksum, sdb:%s id:%d, skip", pTable->name, rowHead->id); pTable->size += real_size; continue; } @@ -802,10 +864,10 @@ void sdbResetTable(SSdbTable *pTable) { pMetaRow = sdbGetRow(pTable, rowHead->data); if (pMetaRow == NULL) { // New object if (rowHead->id < 0) { - sdbError("error sdb negative id: %d, sdb: %s, skip", rowHead->id, pTable->name); + sdbError("error sdb negative id:%d, sdb:%s, skip", rowHead->id, pTable->name); } else { rowMeta.id = rowHead->id; - // TODO: Get rid of the rowMeta.offset and rowSize + // TODO:Get rid of the rowMeta.offset and rowSize rowMeta.offset = pTable->size; rowMeta.rowSize = rowHead->rowSize; rowMeta.row = (*(pTable->appTool))(SDB_TYPE_DECODE, NULL, rowHead->data, rowHead->rowSize, NULL); @@ -838,7 +900,7 @@ void sdbResetTable(SSdbTable *pTable) { sdbTrace("table:%s is updated, sdbVerion:%ld id:%ld", pTable->name, sdbVersion, pTable->id); } -// TODO: A problem here : use snapshot file to sync another node will cause +// TODO:A problem here :use snapshot file to sync another node will cause // problem void sdbSaveSnapShot(void *handle) { SSdbTable *pTable = (SSdbTable *)handle; @@ -868,7 +930,7 @@ void sdbSaveSnapShot(void *handle) { total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); SRowHead *rowHead = (SRowHead *)malloc(total_size); if (rowHead == NULL) { - sdbError("failed to allocate memory while saving SDB snapshot, sdb: %s", pTable->name); + sdbError("failed to allocate memory while saving SDB snapshot, sdb:%s", pTable->name); return; } memset(rowHead, 0, size); diff --git a/src/system/detail/CMakeLists.txt b/src/system/detail/CMakeLists.txt index 9233d6c71bddda7238b3fd351b59293ab957a546..95cce3dfe617464cb0cc442686a2802f2178cbbc 100644 --- a/src/system/detail/CMakeLists.txt +++ b/src/system/detail/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/modules/http/inc) diff --git a/src/system/detail/inc/mgmt.h b/src/system/detail/inc/mgmt.h index 1d1f325ec2db81edecf5bbac0aea064d4fd96c11..39a94f320bba4099d933929b599bbc70ab81b77f 100644 --- a/src/system/detail/inc/mgmt.h +++ b/src/system/detail/inc/mgmt.h @@ -20,11 +20,7 @@ extern "C" { #endif -#include -#include -#include -#include -#include +#include "os.h" #include "sdb.h" #include "tglobalcfg.h" @@ -34,7 +30,6 @@ extern "C" { #include "tmempool.h" #include "trpc.h" #include "tsdb.h" -#include "tsdb.h" #include "tskiplist.h" #include "tsocket.h" #include "ttime.h" @@ -50,17 +45,6 @@ extern int mgmtShellConns; extern int mgmtDnodeConns; extern char mgmtDirectory[]; -enum _TSDB_VG_STATUS { - TSDB_VG_STATUS_READY, - TSDB_VG_STATUS_IN_PROGRESS, - TSDB_VG_STATUS_COMMITLOG_INIT_FAILED, - TSDB_VG_STATUS_INIT_FAILED, - TSDB_VG_STATUS_FULL -}; - -enum _TSDB_DB_STATUS { TSDB_DB_STATUS_READY, TSDB_DB_STATUS_DROPPING, TSDB_DB_STATUS_DROP_FROM_SDB }; - -enum _TSDB_VN_STATUS { TSDB_VN_STATUS_READY, TSDB_VN_STATUS_DROPPING }; typedef struct { uint32_t privateIp; @@ -91,7 +75,7 @@ typedef struct { uint16_t slot; int32_t customScore; // config by user float lbScore; // calc in balance function - int16_t lbState; // set in balance function + int16_t lbStatus; // set in balance function int16_t lastAllocVnode; // increase while create vnode SVnodeLoad vload[TSDB_MAX_VNODES]; char reserved[16]; @@ -153,7 +137,7 @@ typedef struct _vg_obj { int32_t numOfMeters; int32_t lbIp; int32_t lbTime; - int8_t lbState; + int8_t lbStatus; char reserved[16]; char updateEnd[1]; struct _vg_obj *prev, *next; @@ -162,6 +146,9 @@ typedef struct _vg_obj { } SVgObj; typedef struct _db_obj { + /* + * this length will cause the storage structure to change, rollback + */ char name[TSDB_DB_NAME_LEN + 1]; int64_t createdTime; SDbCfg cfg; @@ -235,10 +222,12 @@ typedef struct _connObj { char superAuth : 1; // super user flag char writeAuth : 1; // write flag char killConnection : 1; // kill the connection flag + uint8_t usePublicIp : 1; // if the connection request is publicIp + uint8_t reserved : 4; uint32_t queryId; // query ID to be killed uint32_t streamId; // stream ID to be killed uint32_t ip; // shell IP - short port; // shell port + uint16_t port; // shell port void * thandle; SQList * pQList; // query list SSList * pSList; // stream list @@ -325,7 +314,7 @@ int mgmtUpdateDb(SDbObj *pDb); SDbObj *mgmtGetDb(char *db); SDbObj *mgmtGetDbByMeterId(char *db); int mgmtCreateDb(SAcctObj *pAcct, SCreateDbMsg *pCreate); -int mgmtDropDbByName(SAcctObj *pAcct, char *name); +int mgmtDropDbByName(SAcctObj *pAcct, char *name, short ignoreNotExists); int mgmtDropDb(SDbObj *pDb); /* void mgmtMonitorDbDrop(void *unused); */ void mgmtMonitorDbDrop(void *unused, void *unusedt); @@ -356,7 +345,7 @@ void mgmtCleanUpVgroups(); int mgmtInitMeters(); STabObj *mgmtGetMeter(char *meterId); STabObj *mgmtGetMeterInfo(char *src, char *tags[]); -int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pInfo); +int mgmtRetrieveMetricMeta(SConnObj *pConn, char **pStart, SMetricMetaMsg *pInfo); int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate); int mgmtDropMeter(SDbObj *pDb, char *meterId, int ignore); int mgmtAlterMeter(SDbObj *pDb, SAlterTableMsg *pAlter); @@ -423,15 +412,18 @@ int mgmtRetrieveScores(SShowObj *pShow, char *data, int rows, SConnObj *pConn); int grantGetGrantsMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); int grantRetrieveGrants(SShowObj *pShow, char *data, int rows, SConnObj *pConn); +int mgmtGetVnodeMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveVnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + // dnode balance api int mgmtInitBalance(); void mgmtCleanupBalance(); int mgmtAllocVnodes(SVgObj *pVgroup); void mgmtSetDnodeShellRemoving(SDnodeObj *pDnode); void mgmtSetDnodeUnRemove(SDnodeObj *pDnode); -void mgmtStartBalanceTimer(int mseconds); +void mgmtStartBalanceTimer(int64_t mseconds); void mgmtSetDnodeOfflineOnSdbChanged(); -void mgmtUpdateVgroupState(SVgObj *pVgroup, int lbState, int srcIp); +void mgmtUpdateVgroupState(SVgObj *pVgroup, int lbStatus, int srcIp); bool mgmtAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDestDnode); void mgmtSetModuleInDnode(SDnodeObj *pDnode, int moduleType); diff --git a/src/system/detail/inc/mgmtBalance.h b/src/system/detail/inc/mgmtBalance.h index 4157458a7f3d864e576ed004ba416c4cf3b562ef..a97e7948940f7c5c9ddb2dae8370af84a82c0f34 100644 --- a/src/system/detail/inc/mgmtBalance.h +++ b/src/system/detail/inc/mgmtBalance.h @@ -20,25 +20,14 @@ extern "C" { #endif -#include -#include -#include +#include "os.h" #include "dnodeSystem.h" #include "mgmt.h" #include "tglobalcfg.h" -#include "tstatus.h" +#include "vnodeStatus.h" #include "ttime.h" -enum { - LB_DNODE_STATE_BALANCED, - LB_DNODE_STATE_BALANCING, - LB_DNODE_STATE_OFFLINE_REMOVING, - LB_DNODE_STATE_SHELL_REMOVING -}; - -enum { LB_VGROUP_STATE_READY, LB_VGROUP_STATE_UPDATE }; - void mgmtCreateDnodeOrderList(); void mgmtReleaseDnodeOrderList(); diff --git a/src/system/detail/inc/mgmtSystem.h b/src/system/detail/inc/mgmtSystem.h index 1262d7e834f705d9a67f8f61dff3b70f9793f954..44bb0331e1908ecd49b973b4ea0297d07f261cc0 100644 --- a/src/system/detail/inc/mgmtSystem.h +++ b/src/system/detail/inc/mgmtSystem.h @@ -20,8 +20,6 @@ extern "C" { #endif -#include - int mgmtInitRedirect(); void mgmtCleanUpRedirect(); diff --git a/src/system/detail/inc/mgmtUtil.h b/src/system/detail/inc/mgmtUtil.h index aecb229dba3024c712b82d8dafab7a0dbbf146e7..1f70485894e7e245193b9e432b9e40b126cdb1e6 100644 --- a/src/system/detail/inc/mgmtUtil.h +++ b/src/system/detail/inc/mgmtUtil.h @@ -30,11 +30,13 @@ char* mgmtMeterGetTag(STabObj* pMeter, int32_t col, SSchema* pTagColSchema); int32_t mgmtFindTagCol(STabObj * pMetric, const char * tagName); int32_t mgmtGetTagsLength(STabObj* pMetric, int32_t col); +bool mgmtCheckIsMonitorDB(char *db, char *monitordb); +int32_t mgmtCheckDBParams(SCreateDbMsg *pCreate); int32_t mgmtRetrieveMetersFromMetric(SMetricMetaMsg* pInfo, int32_t tableIndex, tQueryResultset* pRes); int32_t mgmtDoJoin(SMetricMetaMsg* pMetricMetaMsg, tQueryResultset* pRes); void mgmtReorganizeMetersInMetricMeta(SMetricMetaMsg* pInfo, int32_t index, tQueryResultset* pRes); -bool tSkipListNodeFilterCallback(struct tSkipListNode *pNode, void *param); +bool tSkipListNodeFilterCallback(const void *pNode, void *param); #endif //TBASE_MGMTUTIL_H diff --git a/src/system/detail/inc/vnode.h b/src/system/detail/inc/vnode.h index 4d2ebdfa354295d9206bcf395bb35f64a510d798..435184463b4a4dd7fccdbebc10b717d883609b26 100644 --- a/src/system/detail/inc/vnode.h +++ b/src/system/detail/inc/vnode.h @@ -20,11 +20,7 @@ extern "C" { #endif -#include -#include -#include -#include -#include +#include "os.h" #include "tglobalcfg.h" #include "tidpool.h" @@ -33,7 +29,6 @@ extern "C" { #include "trpc.h" #include "tsclient.h" #include "tsdb.h" -#include "tsdb.h" #include "tsocket.h" #include "ttime.h" #include "ttimer.h" @@ -69,15 +64,6 @@ enum _sync_cmd { TSDB_SYNC_CMD_REMOVE, }; -enum _meter_state { - TSDB_METER_STATE_READY = 0x00, - TSDB_METER_STATE_INSERT = 0x01, - TSDB_METER_STATE_IMPORTING = 0x02, - TSDB_METER_STATE_UPDATING = 0x04, - TSDB_METER_STATE_DELETING = 0x10, - TSDB_METER_STATE_DELETED = 0x18, -}; - typedef struct { int64_t offset : 48; int64_t length : 16; @@ -97,7 +83,7 @@ typedef struct { SVPeerDesc vpeers[TSDB_VNODES_SUPPORT]; SVnodePeer * peerInfo[TSDB_VNODES_SUPPORT]; char selfIndex; - char status; + char vnodeStatus; char accessState; // Vnode access state, Readable/Writable char syncStatus; char commitInProcess; @@ -211,26 +197,6 @@ typedef struct { char cont[]; } SVMsgHeader; -/* - * The value of QInfo.signature is used to denote that a query is executing, it isn't safe to release QInfo yet. - * The release operations will be blocked in a busy-waiting until the query operation reach a safepoint. - * Then it will reset the signature in a atomic operation, followed by release operation. - * Only the QInfo.signature == QInfo, this structure can be released safely. - */ -#define TSDB_QINFO_QUERY_FLAG 0x1 -#define TSDB_QINFO_RESET_SIG(x) ((x)->signature = (uint64_t)(x)) -#define TSDB_QINFO_SET_QUERY_FLAG(x) \ - __sync_val_compare_and_swap(&((x)->signature), (uint64_t)(x), TSDB_QINFO_QUERY_FLAG); - -// live lock: wait for query reaching a safe-point, release all resources -// belongs to this query -#define TSDB_WAIT_TO_SAFE_DROP_QINFO(x) \ - { \ - while (__sync_val_compare_and_swap(&((x)->signature), (x), 0) == TSDB_QINFO_QUERY_FLAG) { \ - taosMsleep(1); \ - } \ - } - struct tSQLBinaryExpr; typedef struct SColumnInfoEx { @@ -292,9 +258,7 @@ typedef struct SQuery { int16_t checkBufferInLoop; // check if the buffer is full during scan each block SLimitVal limit; int32_t rowSize; - int32_t dataRowSize; // row size of each loaded data from disk, the value is - // used for prepare buffer SSqlGroupbyExpr * pGroupbyExpr; SSqlFunctionExpr * pSelectExpr; SColumnInfoEx * colList; @@ -342,7 +306,7 @@ extern void * vnodeTmrCtrl; // read API extern int (*vnodeSearchKeyFunc[])(char *pValue, int num, TSKEY key, int order); -void *vnodeQueryInTimeRange(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *sqlExprs, +void *vnodeQueryOnSingleTable(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *sqlExprs, SQueryMeterMsg *pQueryMsg, int *code); void *vnodeQueryOnMultiMeters(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, @@ -358,7 +322,7 @@ bool vnodeIsValidVnodeCfg(SVnodeCfg *pCfg); int32_t vnodeGetResultSize(void *handle, int32_t *numOfRows); -int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows, int32_t *size); +int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows); int64_t vnodeGetOffsetVal(void *thandle); @@ -375,6 +339,8 @@ void vnodeFreeQInfo(void *, bool); void vnodeFreeQInfoInQueue(void *param); bool vnodeIsQInfoValid(void *param); +void vnodeDecRefCount(void *param); +void vnodeAddRefCount(void *param); int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQuery); diff --git a/src/system/detail/inc/vnodePeer.h b/src/system/detail/inc/vnodePeer.h index d44143e619b36c6faaf3dbf078bf5a9f92655fb4..4f17e66a70d8ffc0727021a17359e7f58161cff4 100644 --- a/src/system/detail/inc/vnodePeer.h +++ b/src/system/detail/inc/vnodePeer.h @@ -16,7 +16,7 @@ #ifndef TDENGINE_VNODEPEER_H #define TDENGINE_VNODEPEER_H -#include +#include "os.h" #ifdef __cplusplus extern "C" { diff --git a/src/system/detail/inc/vnodeQueryImpl.h b/src/system/detail/inc/vnodeQueryImpl.h index c00af3b8e981ab20c1d033d1032b1b05a2452f0d..2002483f03136fcc65d0bb1727ac169d12473db7 100644 --- a/src/system/detail/inc/vnodeQueryImpl.h +++ b/src/system/detail/inc/vnodeQueryImpl.h @@ -20,15 +20,20 @@ extern "C" { #endif -#include -#include +#include "os.h" #include "ihash.h" #define GET_QINFO_ADDR(x) ((char*)(x)-offsetof(SQInfo, query)) #define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0) +/* + * set the output buffer page size is 16k + * The page size should be sufficient for at least one output result or intermediate result. + * Some intermediate results may be extremely large, such as top/bottom(100) query. + */ #define DEFAULT_INTERN_BUF_SIZE 16384L + #define INIT_ALLOCATE_DISK_PAGES 60L #define DEFAULT_DATA_FILE_MAPPING_PAGES 2L #define DEFAULT_DATA_FILE_MMAP_WINDOW_SIZE (DEFAULT_DATA_FILE_MAPPING_PAGES * DEFAULT_INTERN_BUF_SIZE) @@ -112,7 +117,7 @@ typedef enum { #define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) typedef int (*__block_search_fn_t)(char* data, int num, int64_t key, int order); -typedef int32_t (*__read_data_fn_t)(int fd, SQInfo* pQInfo, SQueryFileInfo* pQueryFile, char* buf, uint64_t offset, +typedef int32_t (*__read_data_fn_t)(int fd, SQInfo* pQInfo, SQueryFilesInfo* pQueryFile, char* buf, uint64_t offset, int32_t size); static FORCE_INLINE SMeterObj* getMeterObj(void* hashHandle, int32_t sid) { @@ -161,7 +166,7 @@ void pointInterpSupporterDestroy(SPointInterpoSupporter* pPointInterpSupport); void pointInterpSupporterSetData(SQInfo* pQInfo, SPointInterpoSupporter* pPointInterpSupport); int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv* pRuntimeEnv, SPositionInfo* position); -void doCloseAllOpenedResults(SMeterQuerySupportObj* pSupporter); +int32_t doCloseAllOpenedResults(SMeterQuerySupportObj* pSupporter); void disableFunctForSuppleScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order); void enableFunctForMasterScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order); @@ -175,7 +180,7 @@ void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32 SBlockInfo* pBlockBasicInfo, SMeterDataInfo* pDataHeadInfoEx, SField* pFields, __block_search_fn_t searchFn); -SMeterDataInfo** vnodeFilterQualifiedMeters(SQInfo* pQInfo, int32_t vid, SQueryFileInfo* pQueryFileInfo, +SMeterDataInfo** vnodeFilterQualifiedMeters(SQInfo* pQInfo, int32_t vid, int32_t fileIndex, tSidSet* pSidSet, SMeterDataInfo* pMeterDataInfo, int32_t* numOfMeters); int32_t vnodeGetVnodeHeaderFileIdx(int32_t* fid, SQueryRuntimeEnv* pRuntimeEnv, int32_t order); @@ -186,15 +191,16 @@ void freeMeterBlockInfoEx(SMeterDataBlockInfoEx* pDataBlockInfoEx, int32_t len); void setExecutionContext(SMeterQuerySupportObj* pSupporter, SOutputRes* outputRes, int32_t meterIdx, int32_t groupIdx, SMeterQueryInfo* sqinfo); -void setIntervalQueryExecutionContext(SMeterQuerySupportObj* pSupporter, int32_t meterIdx, SMeterQueryInfo* sqinfo); +int32_t setIntervalQueryExecutionContext(SMeterQuerySupportObj* pSupporter, int32_t meterIdx, SMeterQueryInfo* sqinfo); int64_t getQueryStartPositionInCache(SQueryRuntimeEnv* pRuntimeEnv, int32_t* slot, int32_t* pos, bool ignoreQueryRange); int64_t getNextAccessedKeyInData(SQuery* pQuery, int64_t* pPrimaryCol, SBlockInfo* pBlockInfo, int32_t blockStatus); uint32_t getDataBlocksForMeters(SMeterQuerySupportObj* pSupporter, SQuery* pQuery, char* pHeaderData, - int32_t numOfMeters, SQueryFileInfo* pQueryFileInfo, SMeterDataInfo** pMeterDataInfo); -int32_t LoadDatablockOnDemand(SCompBlock* pBlock, SField** pFields, int8_t* blkStatus, SQueryRuntimeEnv* pRuntimeEnv, + int32_t numOfMeters, const char* filePath, SMeterDataInfo** pMeterDataInfo); +int32_t LoadDatablockOnDemand(SCompBlock* pBlock, SField** pFields, uint8_t* blkStatus, SQueryRuntimeEnv* pRuntimeEnv, int32_t fileIdx, int32_t slotIdx, __block_search_fn_t searchFn, bool onDemand); +char *vnodeGetHeaderFileData(SQueryRuntimeEnv *pRuntimeEnv, int32_t vnodeId, int32_t fileIndex); /** * Create SMeterQueryInfo. @@ -224,11 +230,11 @@ void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY /** * add the new allocated disk page to meter query info * the new allocated disk page is used to keep the intermediate (interval) results - * + * @param pQuery * @param pMeterQueryInfo * @param pSupporter */ -tFilePage* addDataPageForMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter); +tFilePage* addDataPageForMeterQueryInfo(SQuery* pQuery, SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter); /** * save the query range data into SMeterQueryInfo diff --git a/src/system/detail/inc/vnodeRead.h b/src/system/detail/inc/vnodeRead.h index 20f49354f2fee5ab1c7d027ff9437a18c54942f2..8011595e4183915767e34d4fc9e3a98a2a36bcfb 100644 --- a/src/system/detail/inc/vnodeRead.h +++ b/src/system/detail/inc/vnodeRead.h @@ -20,7 +20,7 @@ extern "C" { #endif -#include +#include "os.h" #include "tinterpolation.h" #include "vnodeTagMgmt.h" @@ -40,6 +40,7 @@ typedef struct SQueryLoadBlockInfo { int32_t fileId; int32_t slotIdx; int32_t sid; + bool tsLoaded; // if timestamp column of current block is loaded or not } SQueryLoadBlockInfo; typedef struct SQueryLoadCompBlockInfo { @@ -47,29 +48,13 @@ typedef struct SQueryLoadCompBlockInfo { int32_t fileId; int32_t fileListIndex; } SQueryLoadCompBlockInfo; + /* * the header file info for one vnode */ -typedef struct SQueryFileInfo { - int32_t fileID; /* file id */ - char headerFilePath[256]; /* full file name */ - char dataFilePath[256]; - char lastFilePath[256]; - int32_t defaultMappingSize; /* default mapping size */ - - int32_t headerFd; /* file handler */ - char* pHeaderFileData; /* mmap header files */ - size_t headFileSize; - int32_t dataFd; - char* pDataFileData; - size_t dataFileSize; - uint64_t dtFileMappingOffset; - - int32_t lastFd; - size_t lastFileSize; - uint64_t lastFileMappingOffset; - -} SQueryFileInfo; +typedef struct SHeaderFileInfo { + int32_t fileID; // file id +} SHeaderFileInfo; typedef struct SQueryCostSummary { double cacheTimeUs; @@ -106,45 +91,56 @@ typedef struct SOutputRes { SResultInfo* resultInfo; } SOutputRes; +/* + * header files info, avoid to iterate the directory, the data is acquired + * during in query preparation function + */ +typedef struct SQueryFilesInfo { + SHeaderFileInfo* pFileInfo; + uint32_t numOfFiles; // the total available number of files for this virtual node during query execution + int32_t current; // the memory mapped header file, NOTE: only one header file can be mmap. + int32_t vnodeId; + + int32_t headerFd; // header file fd + char* pHeaderFileData; // mmap header files + int64_t headFileSize; + int32_t dataFd; + int32_t lastFd; + + char headerFilePath[PATH_MAX]; // current opened header file name + char dataFilePath[PATH_MAX]; // current opened data file name + char lastFilePath[PATH_MAX]; // current opened last file path + char dbFilePathPrefix[PATH_MAX]; +} SQueryFilesInfo; + typedef struct RuntimeEnvironment { - SPositionInfo startPos; /* the start position, used for secondary/third iteration */ - SPositionInfo endPos; /* the last access position in query, served as the start pos of reversed order query */ - SPositionInfo nextPos; /* start position of the next scan */ - SData* colDataBuffer[TSDB_MAX_COLUMNS]; - SResultInfo* resultInfo; - - // Indicate if data block is loaded, the block is first/last/internal block - int8_t blockStatus; - int32_t unzipBufSize; - SData* primaryColBuffer; - char* unzipBuffer; - char* secondaryUnzipBuffer; - SQuery* pQuery; - SMeterObj* pMeterObj; - SQLFunctionCtx* pCtx; - SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */ + SPositionInfo startPos; /* the start position, used for secondary/third iteration */ + SPositionInfo endPos; /* the last access position in query, served as the start pos of reversed order query */ + SPositionInfo nextPos; /* start position of the next scan */ + SData* colDataBuffer[TSDB_MAX_COLUMNS]; + SResultInfo* resultInfo; + uint8_t blockStatus; // Indicate if data block is loaded, the block is first/last/internal block + int32_t unzipBufSize; + SData* primaryColBuffer; + char* unzipBuffer; + char* secondaryUnzipBuffer; + SQuery* pQuery; + SMeterObj* pMeterObj; + SQLFunctionCtx* pCtx; + SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */ SQueryLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */ - - /* - * header files info, avoid to iterate the directory, the data is acquired - * during in query preparation function - */ - SQueryFileInfo* pHeaderFiles; - uint32_t numOfFiles; /* number of files of one vnode during query execution */ - - int16_t numOfRowsPerPage; - int16_t offset[TSDB_MAX_COLUMNS]; - - int16_t scanFlag; /* denotes reversed scan of data or not */ - SInterpolationInfo interpoInfo; - SData** pInterpoBuf; - SOutputRes* pResult; // reference to SQuerySupporter->pResult - void* hashList; - int32_t usedIndex; // assigned SOutputRes in list - - STSBuf* pTSBuf; - STSCursor cur; - SQueryCostSummary summary; + SQueryFilesInfo vnodeFileInfo; + int16_t numOfRowsPerPage; + int16_t offset[TSDB_MAX_COLUMNS]; + int16_t scanFlag; // denotes reversed scan of data or not + SInterpolationInfo interpoInfo; + SData** pInterpoBuf; + SOutputRes* pResult; // reference to SQuerySupporter->pResult + void* hashList; + int32_t usedIndex; // assigned SOutputRes in list + STSBuf* pTSBuf; + STSCursor cur; + SQueryCostSummary summary; } SQueryRuntimeEnv; /* intermediate result during multimeter query involves interval */ @@ -219,14 +215,12 @@ typedef struct SMeterQuerySupportObj { SMeterDataInfo* pMeterDataInfo; - TSKEY* tsList; - int32_t tsNum; - + TSKEY* tsList; } SMeterQuerySupportObj; typedef struct _qinfo { - uint64_t signature; - + uint64_t signature; + int32_t refCount; // QInfo reference count, when the value is 0, it can be released safely char user[TSDB_METER_ID_LEN + 1]; char sql[TSDB_SHOW_SQL_LEN]; uint8_t stream; @@ -236,24 +230,21 @@ typedef struct _qinfo { int64_t useconds; int killed; struct _qinfo *prev, *next; + SQuery query; + int num; + int totalPoints; + int pointsRead; + int pointsReturned; + int pointsInterpo; + int code; + char bufIndex; + char changed; + char over; + SMeterObj* pObj; + sem_t dataReady; - SQuery query; - int num; - int totalPoints; - int pointsRead; - int pointsReturned; - int pointsInterpo; - int code; - char bufIndex; - char changed; - char over; - SMeterObj* pObj; - - int (*fp)(SMeterObj*, SQuery*); - - sem_t dataReady; SMeterQuerySupportObj* pMeterQuerySupporter; - + int (*fp)(SMeterObj*, SQuery*); } SQInfo; int32_t vnodeQuerySingleMeterPrepare(SQInfo* pQInfo, SMeterObj* pMeterObj, SMeterQuerySupportObj* pSMultiMeterObj, diff --git a/src/system/detail/inc/vnodeShell.h b/src/system/detail/inc/vnodeShell.h index d0194a1765fc55d859c9ec645a1ca247f55ed79b..25646fbb1a88e2bd971d90b35fa8ecaf4cc42374 100644 --- a/src/system/detail/inc/vnodeShell.h +++ b/src/system/detail/inc/vnodeShell.h @@ -20,13 +20,13 @@ extern "C" { #endif -#include +#include "os.h" typedef struct { int sid; int vnode; uint32_t ip; - short port; + uint16_t port; int count; // track the number of imports int code; // track the code of imports int numOfTotalPoints; // track the total number of points imported diff --git a/src/system/detail/inc/vnodeStatus.h b/src/system/detail/inc/vnodeStatus.h new file mode 100644 index 0000000000000000000000000000000000000000..1a28d67e98c815ed7a5b3efd1072cb89c3c231b9 --- /dev/null +++ b/src/system/detail/inc/vnodeStatus.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSTATUS_H +#define TDENGINE_TSTATUS_H + +#ifdef __cplusplus +extern "C" { +#endif + +enum _TSDB_VG_STATUS { + TSDB_VG_STATUS_READY, + TSDB_VG_STATUS_IN_PROGRESS, + TSDB_VG_STATUS_COMMITLOG_INIT_FAILED, + TSDB_VG_STATUS_INIT_FAILED, + TSDB_VG_STATUS_FULL +}; + +enum _TSDB_DB_STATUS { + TSDB_DB_STATUS_READY, + TSDB_DB_STATUS_DROPPING, + TSDB_DB_STATUS_DROP_FROM_SDB +}; + +enum _TSDB_VN_STATUS { + TSDB_VN_STATUS_OFFLINE, + TSDB_VN_STATUS_CREATING, + TSDB_VN_STATUS_UNSYNCED, + TSDB_VN_STATUS_SLAVE, + TSDB_VN_STATUS_MASTER, + TSDB_VN_STATUS_CLOSING, + TSDB_VN_STATUS_DELETING, +}; + +enum _TSDB_VN_SYNC_STATUS { + TSDB_VN_SYNC_STATUS_INIT, + TSDB_VN_SYNC_STATUS_SYNCING, + TSDB_VN_SYNC_STATUS_SYNC_CACHE, + TSDB_VN_SYNC_STATUS_SYNC_FILE +}; + +enum _TSDB_VN_DROP_STATUS { + TSDB_VN_DROP_STATUS_READY, + TSDB_VN_DROP_STATUS_DROPPING +}; + +enum _TSDB_DN_STATUS { + TSDB_DN_STATUS_OFFLINE, + TSDB_DN_STATUS_READY +}; + +enum _TSDB_DN_LB_STATUS { + TSDB_DN_LB_STATUS_BALANCED, + TSDB_DN_LB_STATUS_BALANCING, + TSDB_DN_LB_STATUS_OFFLINE_REMOVING, + TSDB_DN_LB_STATE_SHELL_REMOVING +}; + +enum _TSDB_VG_LB_STATUS { + TSDB_VG_LB_STATUS_READY, + TSDB_VG_LB_STATUS_UPDATE +}; + +enum _TSDB_VN_STREAM_STATUS { + TSDB_VN_STREAM_STATUS_STOP, + TSDB_VN_STREAM_STATUS_START +}; + +enum TSDB_TABLE_STATUS { + TSDB_METER_STATE_READY = 0x00, + TSDB_METER_STATE_INSERTING = 0x01, + TSDB_METER_STATE_IMPORTING = 0x02, + TSDB_METER_STATE_UPDATING = 0x04, + TSDB_METER_STATE_DROPPING = 0x10, + TSDB_METER_STATE_DROPPED = 0x18, +}; + +const char* taosGetVgroupStatusStr(int32_t vgroupStatus); +const char* taosGetDbStatusStr(int32_t dbStatus); +const char* taosGetVnodeStatusStr(int32_t vnodeStatus); +const char* taosGetVnodeSyncStatusStr(int32_t vnodeSyncStatus); +const char* taosGetVnodeDropStatusStr(int32_t dropping); +const char* taosGetDnodeStatusStr(int32_t dnodeStatus); +const char* taosGetDnodeLbStatusStr(int32_t dnodeBalanceStatus); +const char* taosGetVgroupLbStatusStr(int32_t vglbStatus); +const char* taosGetVnodeStreamStatusStr(int32_t vnodeStreamStatus); + +const char* taosGetTableStatusStr(int32_t tableStatus); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSTATUS_H diff --git a/src/system/detail/inc/vnodeStore.h b/src/system/detail/inc/vnodeStore.h index d4eedd4ce00cfd9ff5f52b9740c99221d1e049ec..638bcb54bb40f7621a8712591775e5f4e676e704 100644 --- a/src/system/detail/inc/vnodeStore.h +++ b/src/system/detail/inc/vnodeStore.h @@ -16,8 +16,6 @@ #ifndef TDENGINE_VNODESTORE_H #define TDENGINE_VNODESTORE_H -#include - #ifdef __cplusplus extern "C" { #endif diff --git a/src/system/detail/inc/vnodeSystem.h b/src/system/detail/inc/vnodeSystem.h index e69b0b9f1e38f919819199473839edd1246ae776..e436288fc149cb5381b1e7f583d9450fd8772f5d 100644 --- a/src/system/detail/inc/vnodeSystem.h +++ b/src/system/detail/inc/vnodeSystem.h @@ -16,8 +16,6 @@ #ifndef TDENGINE_VNODESYSTEM_H #define TDENGINE_VNODESYSTEM_H -#include - #ifdef __cplusplus extern "C" { #endif diff --git a/src/system/detail/inc/vnodeUtil.h b/src/system/detail/inc/vnodeUtil.h index 870579466013cb192f403f7e15c67c8f81fa668b..b0f573ba2d466b084ce750e2e5d368cca61d353e 100644 --- a/src/system/detail/inc/vnodeUtil.h +++ b/src/system/detail/inc/vnodeUtil.h @@ -81,6 +81,8 @@ int32_t vnodeSetMeterState(SMeterObj* pMeterObj, int32_t state); void vnodeClearMeterState(SMeterObj* pMeterObj, int32_t state); bool vnodeIsMeterState(SMeterObj* pMeterObj, int32_t state); void vnodeSetMeterDeleting(SMeterObj* pMeterObj); +int32_t vnodeSetMeterInsertImportStateEx(SMeterObj* pObj, int32_t st); + bool vnodeIsSafeToDeleteMeter(SVnodeObj* pVnode, int32_t sid); void vnodeFreeColumnInfo(SColumnInfo* pColumnInfo); bool isGroupbyNormalCol(SSqlGroupbyExpr* pExpr); diff --git a/src/system/detail/src/dnodeMgmt.c b/src/system/detail/src/dnodeMgmt.c index 3a6c9e50a8636034983056bc5cd65da05de59478..9842e0dad6eab3355c914a34484a58861a88d132 100644 --- a/src/system/detail/src/dnodeMgmt.c +++ b/src/system/detail/src/dnodeMgmt.c @@ -14,9 +14,8 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include + +#include "os.h" #include "dnodeSystem.h" #include "taosmsg.h" @@ -27,6 +26,7 @@ #include "vnodeMgmt.h" #include "vnodeSystem.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" SMgmtObj mgmtObj; extern uint64_t tsCreatedTime; @@ -105,18 +105,18 @@ int vnodeProcessCreateMeterRequest(char *pMsg, int msgLen, SMgmtObj *pObj) { if (vid >= TSDB_MAX_VNODES || vid < 0) { dError("vid:%d, vnode is out of range", vid); - code = TSDB_CODE_INVALID_SESSION_ID; + code = TSDB_CODE_INVALID_VNODE_ID; goto _over; } pVnode = vnodeList + vid; if (pVnode->cfg.maxSessions <= 0) { dError("vid:%d, not activated", vid); - code = TSDB_CODE_NOT_ACTIVE_SESSION; + code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _over; } - if (pVnode->syncStatus == TSDB_SSTATUS_SYNCING) { + if (pVnode->syncStatus == TSDB_VN_SYNC_STATUS_SYNCING) { code = vnodeSaveCreateMsgIntoQueue(pVnode, pMsg, msgLen); dTrace("vid:%d, create msg is saved into sync queue", vid); } else { @@ -141,27 +141,27 @@ int vnodeProcessAlterStreamRequest(char *pMsg, int msgLen, SMgmtObj *pObj) { if (vid >= TSDB_MAX_VNODES || vid < 0) { dError("vid:%d, vnode is out of range", vid); - code = TSDB_CODE_INVALID_SESSION_ID; + code = TSDB_CODE_INVALID_VNODE_ID; goto _over; } pVnode = vnodeList + vid; if (pVnode->cfg.maxSessions <= 0 || pVnode->pCachePool == NULL) { dError("vid:%d is not activated yet", pAlter->vnode); - code = TSDB_CODE_INVALID_SESSION_ID; + code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _over; } if (pAlter->sid >= pVnode->cfg.maxSessions || pAlter->sid < 0) { dError("vid:%d sid:%d uid:%ld, sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); - code = TSDB_CODE_INVALID_SESSION_ID; + code = TSDB_CODE_INVALID_TABLE_ID; goto _over; } SMeterObj *pMeterObj = vnodeList[vid].meterList[sid]; if (pMeterObj == NULL || sid != pMeterObj->sid || vid != pMeterObj->vnode) { - dError("vid:%d sid:%d, no active session", vid, sid); - code = TSDB_CODE_NOT_ACTIVE_SESSION; + dError("vid:%d sid:%d, not active table", vid, sid); + code = TSDB_CODE_NOT_ACTIVE_TABLE; goto _over; } @@ -195,7 +195,7 @@ int vnodeProcessCreateMeterMsg(char *pMsg, int msgLen) { if (pCreate->vnode >= TSDB_MAX_VNODES || pCreate->vnode < 0) { dError("vid:%d is out of range", pCreate->vnode); - code = TSDB_CODE_INVALID_SESSION_ID; + code = TSDB_CODE_INVALID_VNODE_ID; goto _create_over; } @@ -203,13 +203,13 @@ int vnodeProcessCreateMeterMsg(char *pMsg, int msgLen) { if (pVnode->pCachePool == NULL) { dError("vid:%d is not activated yet", pCreate->vnode); vnodeSendVpeerCfgMsg(pCreate->vnode); - code = TSDB_CODE_NOT_ACTIVE_SESSION; + code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _create_over; } if (pCreate->sid >= pVnode->cfg.maxSessions || pCreate->sid < 0) { dError("vid:%d sid:%d id:%s, sid is out of range", pCreate->vnode, pCreate->sid, pCreate->meterId); - code = TSDB_CODE_INVALID_SESSION_ID; + code = TSDB_CODE_INVALID_TABLE_ID; goto _create_over; } @@ -331,7 +331,7 @@ int vnodeProcessVPeerCfg(char *msg, int msgLen, SMgmtObj *pMgmtObj) { return -1; } - if (vnodeList[vnode].status == TSDB_STATUS_CREATING) { + if (vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_CREATING) { dTrace("vid:%d, vnode is still under creating", vnode); return 0; } @@ -350,34 +350,53 @@ int vnodeProcessVPeerCfg(char *msg, int msgLen, SMgmtObj *pMgmtObj) { pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock); if (pCfg->replications > 0) { - dTrace("vid:%d, vpeer cfg received, replica:%d session:%d, vnodeList replica:%d session:%d", - vnode, pCfg->replications, pCfg->maxSessions, vnodeList[vnode].cfg.replications, vnodeList[vnode].cfg.maxSessions); + dPrint("vid:%d, vpeer cfg received, replica:%d session:%d, vnodeList replica:%d session:%d, acct:%s db:%s", + vnode, pCfg->replications, pCfg->maxSessions, vnodeList[vnode].cfg.replications, vnodeList[vnode].cfg.maxSessions, + pCfg->acct, pCfg->db); for (i = 0; i < pCfg->replications; ++i) { pMsg->vpeerDesc[i].vnode = htonl(pMsg->vpeerDesc[i].vnode); pMsg->vpeerDesc[i].ip = htonl(pMsg->vpeerDesc[i].ip); - dTrace("vid:%d, vpeer:%d ip:0x%x vid:%d ", vnode, i, pMsg->vpeerDesc[i].ip, pMsg->vpeerDesc[i].vnode); + dPrint("vid:%d, vpeer:%d ip:0x%x vid:%d ", vnode, i, pMsg->vpeerDesc[i].ip, pMsg->vpeerDesc[i].vnode); } } if (vnodeList[vnode].cfg.maxSessions == 0) { + dPrint("vid:%d, vnode is empty", vnode); if (pCfg->maxSessions > 0) { - return vnodeCreateVnode(vnode, pCfg, pMsg->vpeerDesc); + if (vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_OFFLINE) { + dPrint("vid:%d, status:%s, start to create vnode", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + return vnodeCreateVnode(vnode, pCfg, pMsg->vpeerDesc); + } else { + dPrint("vid:%d, status:%s, cannot preform create vnode operation", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + return TSDB_CODE_INVALID_VNODE_STATUS; + } } } else { + dPrint("vid:%d, vnode is not empty", vnode); if (pCfg->maxSessions > 0) { - if (pCfg->maxSessions != vnodeList[vnode].cfg.maxSessions) { - vnodeCleanUpOneVnode(vnode); - } - - vnodeConfigVPeers(vnode, pCfg->replications, pMsg->vpeerDesc); - vnodeSaveVnodeCfg(vnode, pCfg, pMsg->vpeerDesc); + if (vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_DELETING) { + dPrint("vid:%d, status:%s, wait vnode delete finished", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + } else { + dPrint("vid:%d, status:%s, start to update vnode", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); - if (pCfg->maxSessions != vnodeList[vnode].cfg.maxSessions) { - vnodeUpdateHeadFile(vnode, vnodeList[vnode].cfg.maxSessions, pCfg->maxSessions); - vnodeList[vnode].cfg.maxSessions = pCfg->maxSessions; - vnodeOpenVnode(vnode); + if (pCfg->maxSessions != vnodeList[vnode].cfg.maxSessions) { + vnodeCleanUpOneVnode(vnode); + } + + vnodeConfigVPeers(vnode, pCfg->replications, pMsg->vpeerDesc); + vnodeSaveVnodeCfg(vnode, pCfg, pMsg->vpeerDesc); + + /* + if (pCfg->maxSessions != vnodeList[vnode].cfg.maxSessions) { + vnodeUpdateHeadFile(vnode, vnodeList[vnode].cfg.maxSessions, pCfg->maxSessions); + vnodeList[vnode].cfg.maxSessions = pCfg->maxSessions; + vnodeOpenVnode(vnode); + } + */ } + return 0; } else { + dPrint("vid:%d, status:%s, start to delete vnode", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); vnodeRemoveVnode(vnode); } } @@ -435,11 +454,11 @@ int vnodeProcessFreeVnodeRequest(char *pMsg, int msgLen, SMgmtObj *pMgmtObj) { pFree->vnode = htons(pFree->vnode); if (pFree->vnode < 0 || pFree->vnode >= TSDB_MAX_VNODES) { - dWarn("vid:%d out of range", pFree->vnode); + dWarn("vid:%d, out of range", pFree->vnode); return -1; } - dTrace("vid:%d receive free vnode message", pFree->vnode); + dTrace("vid:%d, receive free vnode message", pFree->vnode); int32_t code = vnodeRemoveVnode(pFree->vnode); assert(code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS); diff --git a/src/system/detail/src/dnodeService.c b/src/system/detail/src/dnodeService.c index 86d3b4a795a789aebf51ab2e529737f55811b815..742efb7b6a72856095759d7bf8262f50103185f8 100644 --- a/src/system/detail/src/dnodeService.c +++ b/src/system/detail/src/dnodeService.c @@ -14,20 +14,8 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include + +#include "os.h" #include "dnodeSystem.h" #include "tglobalcfg.h" @@ -67,12 +55,28 @@ int main(int argc, char *argv[]) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-V") == 0) { - printf("version: %s compatible_version: %s\n", version, compatible_version); + char *versionStr = tsIsCluster ? "enterprise" : "community"; + printf("%s version: %s compatible_version: %s\n", versionStr, version, compatible_version); printf("gitinfo: %s\n", gitinfo); + printf("gitinfoI: %s\n", gitinfoOfInternal); printf("buildinfo: %s\n", buildinfo); return 0; } else if (strcmp(argv[i], "-k") == 0) { dnodeParseParameterK(); +#ifdef TAOS_MEM_CHECK + } else if (strcmp(argv[i], "--alloc-random-fail") == 0) { + if ((i < argc - 1) && (argv[i+1][0] != '-')) { + taosSetAllocMode(TAOS_ALLOC_MODE_RANDOM_FAIL, argv[++i], true); + } else { + taosSetAllocMode(TAOS_ALLOC_MODE_RANDOM_FAIL, NULL, true); + } + } else if (strcmp(argv[i], "--detect-mem-leak") == 0) { + if ((i < argc - 1) && (argv[i+1][0] != '-')) { + taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, argv[++i], true); + } else { + taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true); + } +#endif } } diff --git a/src/system/detail/src/dnodeSystem.c b/src/system/detail/src/dnodeSystem.c index a0fdf95fc8f9fdafb729f271017fbf510ec2db39..49e9a37590996921b1784c56e5edca4246fd2bd0 100644 --- a/src/system/detail/src/dnodeSystem.c +++ b/src/system/detail/src/dnodeSystem.c @@ -14,11 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include +#include "os.h" #include "mgmt.h" #include "vnode.h" @@ -30,11 +26,8 @@ #include "tglobalcfg.h" #include "vnode.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Woverflow" - -SModule tsModule[TSDB_MOD_MAX]; -uint32_t tsModuleStatus; +SModule tsModule[TSDB_MOD_MAX] = {0}; +uint32_t tsModuleStatus = 0; pthread_mutex_t dmutex; extern int vnodeSelectReqNum; extern int vnodeInsertReqNum; @@ -140,8 +133,12 @@ int dnodeInitSystem() { return -1; } + vnodeInitMgmtIp(); + tsPrintGlobalConfig(); - dPrint("Server IP address is:%s", tsInternalIp); + dPrint("Server IP address is:%s", tsPrivateIp); + + taosSetCoreDump(); signal(SIGPIPE, SIG_IGN); @@ -216,8 +213,6 @@ void dnodeResetSystem() { void dnodeCountRequest(SCountInfo *info) { httpGetReqCount(&info->httpReqNum); - info->selectReqNum = __sync_fetch_and_and(&vnodeSelectReqNum, 0); - info->insertReqNum = __sync_fetch_and_and(&vnodeInsertReqNum, 0); + info->selectReqNum = atomic_exchange_32(&vnodeSelectReqNum, 0); + info->insertReqNum = atomic_exchange_32(&vnodeInsertReqNum, 0); } - -#pragma GCC diagnostic pop \ No newline at end of file diff --git a/src/system/detail/src/mgmtAcct.c b/src/system/detail/src/mgmtAcct.c index dac67518e9225c0966e26985e4c4395f44b1bd43..e5f52b17f963dfeaf915c21aeedf61e9655d8cd1 100644 --- a/src/system/detail/src/mgmtAcct.c +++ b/src/system/detail/src/mgmtAcct.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include +#include "os.h" #include "mgmt.h" #include "tschemautil.h" diff --git a/src/system/detail/src/mgmtConn.c b/src/system/detail/src/mgmtConn.c index b3fb24de6b19963348666cffc05c2745d47639a5..b440a1042e30756f50481f63afb5d82c37c5afc7 100644 --- a/src/system/detail/src/mgmtConn.c +++ b/src/system/detail/src/mgmtConn.c @@ -14,8 +14,10 @@ */ #define _DEFAULT_SOURCE + +#include "os.h" + #include "mgmt.h" -#include #include "taosmsg.h" #include "tschemautil.h" @@ -23,7 +25,7 @@ typedef struct { char user[TSDB_METER_ID_LEN]; uint64_t stime; uint32_t ip; - short port; + uint16_t port; } SConnInfo; typedef struct { @@ -46,7 +48,7 @@ int mgmtGetConns(SShowObj *pShow, SConnObj *pConn) { pConn = pAcct->pConn; SConnInfo *pConnInfo = pConnShow->connInfo; - while (pConn) { + while (pConn && pConn->pUser) { strcpy(pConnInfo->user, pConn->pUser->user); pConnInfo->ip = pConn->ip; pConnInfo->port = pConn->port; diff --git a/src/system/detail/src/mgmtDb.c b/src/system/detail/src/mgmtDb.c index ae4c5bed7ce86b712c0886b98b9cf2e5118899f6..fb449f4761279c1d452092ce5f8f23a505bfa7e7 100644 --- a/src/system/detail/src/mgmtDb.c +++ b/src/system/detail/src/mgmtDb.c @@ -14,10 +14,13 @@ */ #define _DEFAULT_SOURCE +#include "os.h" + #include "mgmt.h" -#include #include "mgmtBalance.h" +#include "mgmtUtil.h" #include "tschemautil.h" +#include "vnodeStatus.h" void *dbSdb = NULL; int tsDbUpdateSize; @@ -51,8 +54,8 @@ void mgmtDbActionInit() { } void *mgmtDbAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtDbActionFp[action] != NULL) { - return (*(mgmtDbActionFp[action]))(row, str, size, ssize); + if (mgmtDbActionFp[(uint8_t)action] != NULL) { + return (*(mgmtDbActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } @@ -137,91 +140,31 @@ int mgmtCheckDbParams(SCreateDbMsg *pCreate) { if (pCreate->replications < 0) pCreate->replications = tsReplications; // if (pCreate->rowsInFileBlock < 0) pCreate->rowsInFileBlock = tsRowsInFileBlock; // if (pCreate->cacheNumOfBlocks.fraction < 0) pCreate->cacheNumOfBlocks.fraction = tsAverageCacheBlocks; // - //-1 for balance - -#ifdef CLUSTER - if (pCreate->replications > TSDB_VNODES_SUPPORT - 1) pCreate->replications = TSDB_VNODES_SUPPORT - 1; -#else - pCreate->replications = 1; -#endif - - if (pCreate->commitLog < 0 || pCreate->commitLog > 1) { - mTrace("invalid db option commitLog: %d", pCreate->commitLog); - return TSDB_CODE_INVALID_OPTION; - } - if (pCreate->replications < TSDB_REPLICA_MIN_NUM || pCreate->replications > TSDB_REPLICA_MAX_NUM) { - mTrace("invalid db option replications: %d", pCreate->replications); - return TSDB_CODE_INVALID_OPTION; - } - - if (pCreate->daysPerFile < TSDB_FILE_MIN_PARTITION_RANGE || pCreate->daysPerFile > TSDB_FILE_MAX_PARTITION_RANGE) { - mTrace("invalid db option daysPerFile: %d valid range: %d--%d", pCreate->daysPerFile, TSDB_FILE_MIN_PARTITION_RANGE, - TSDB_FILE_MAX_PARTITION_RANGE); - return TSDB_CODE_INVALID_OPTION; - } - - if (pCreate->daysToKeep1 > pCreate->daysToKeep2 || pCreate->daysToKeep2 > pCreate->daysToKeep) { - mTrace("invalid db option daystokeep1: %d, daystokeep2: %d, daystokeep: %d", pCreate->daysToKeep1, - pCreate->daysToKeep2, pCreate->daysToKeep); - return TSDB_CODE_INVALID_OPTION; - } - - if (pCreate->daysToKeep1 < TSDB_FILE_MIN_PARTITION_RANGE || pCreate->daysToKeep1 < pCreate->daysPerFile) { - mTrace("invalid db option daystokeep: %d", pCreate->daysToKeep); - return TSDB_CODE_INVALID_OPTION; - } - if (pCreate->rowsInFileBlock < TSDB_MIN_ROWS_IN_FILEBLOCK || pCreate->rowsInFileBlock > TSDB_MAX_ROWS_IN_FILEBLOCK) { - mTrace("invalid db option rowsInFileBlock: %d valid range: %d--%d", pCreate->rowsInFileBlock, - TSDB_MIN_ROWS_IN_FILEBLOCK, TSDB_MAX_ROWS_IN_FILEBLOCK); - return TSDB_CODE_INVALID_OPTION; - } - if (pCreate->cacheBlockSize < TSDB_MIN_CACHE_BLOCK_SIZE || pCreate->cacheBlockSize > TSDB_MAX_CACHE_BLOCK_SIZE) { - mTrace("invalid db option cacheBlockSize: %d valid range: %d--%d", pCreate->cacheBlockSize, - TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MAX_CACHE_BLOCK_SIZE); - return TSDB_CODE_INVALID_OPTION; - } - if (pCreate->maxSessions < TSDB_MIN_TABLES_PER_VNODE || pCreate->maxSessions > TSDB_MAX_TABLES_PER_VNODE) { - mTrace("invalid db option maxSessions: %d valid range: %d--%d", pCreate->maxSessions, TSDB_MIN_TABLES_PER_VNODE, - TSDB_MAX_TABLES_PER_VNODE); - return TSDB_CODE_INVALID_OPTION; - } - - if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) { - mTrace("invalid db option timePrecision: %d valid value: %d,%d", pCreate->precision, TSDB_TIME_PRECISION_MILLI, - TSDB_TIME_PRECISION_MICRO); + if (mgmtCheckDBParams(pCreate) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_OPTION; } - - if (pCreate->cacheNumOfBlocks.fraction < 0 || pCreate->cacheNumOfBlocks.fraction > TSDB_MAX_AVG_BLOCKS) { - mTrace("invalid db option ablocks: %d valid value: %d,%d", pCreate->precision, 0, TSDB_MAX_AVG_BLOCKS); - return TSDB_CODE_INVALID_OPTION; - } else { - pCreate->cacheNumOfBlocks.totalBlocks = (int32_t)(pCreate->cacheNumOfBlocks.fraction * pCreate->maxSessions); - } - + + pCreate->cacheNumOfBlocks.totalBlocks = (int32_t)(pCreate->cacheNumOfBlocks.fraction * pCreate->maxSessions); + if (pCreate->cacheNumOfBlocks.totalBlocks > TSDB_MAX_CACHE_BLOCKS) { - mTrace("invalid db option cacheNumOfBlocks: %d valid range: %d", pCreate->cacheNumOfBlocks.totalBlocks, - TSDB_MAX_CACHE_BLOCKS); + mTrace("invalid db option cacheNumOfBlocks: %d valid range: [%d, %d]", pCreate->cacheNumOfBlocks.totalBlocks, + TSDB_MIN_CACHE_BLOCKS, TSDB_MAX_CACHE_BLOCKS); return TSDB_CODE_INVALID_OPTION; } - if (pCreate->commitTime < TSDB_MIN_COMMIT_TIME_INTERVAL || pCreate->commitTime > TSDB_MAX_COMMIT_TIME_INTERVAL) { - mTrace("invalid db option commitTime: %d valid range: %d-%d", pCreate->commitTime, TSDB_MIN_COMMIT_TIME_INTERVAL, - TSDB_MAX_COMMIT_TIME_INTERVAL); - return TSDB_CODE_INVALID_OPTION; - } - if (pCreate->compression > TSDB_MAX_COMPRESSION_LEVEL) { - mTrace("invalid db option compression: %d", pCreate->compression, TSDB_MIN_COMMIT_TIME_INTERVAL, - TSDB_MAX_COMMIT_TIME_INTERVAL); - return TSDB_CODE_INVALID_OPTION; + // calculate the blocks per table + if (pCreate->blocksPerMeter < 0) { + pCreate->blocksPerMeter = pCreate->cacheNumOfBlocks.totalBlocks / 4; } - - if (pCreate->blocksPerMeter < 0) pCreate->blocksPerMeter = pCreate->cacheNumOfBlocks.totalBlocks / 4; + if (pCreate->blocksPerMeter > pCreate->cacheNumOfBlocks.totalBlocks * 3 / 4) { pCreate->blocksPerMeter = pCreate->cacheNumOfBlocks.totalBlocks * 3 / 4; } - if (pCreate->blocksPerMeter < 4) pCreate->blocksPerMeter = 4; + + if (pCreate->blocksPerMeter < TSDB_MIN_AVG_BLOCKS) { + pCreate->blocksPerMeter = TSDB_MIN_AVG_BLOCKS; + } pCreate->maxSessions++; @@ -280,8 +223,8 @@ int mgmtSetDbDropping(SDbObj *pDb) { if (pDnode == NULL) continue; SVnodeLoad *pVload = &pDnode->vload[pVnodeGid->vnode]; - if (pVload->dropStatus != TSDB_VN_STATUS_DROPPING) { - pVload->dropStatus = TSDB_VN_STATUS_DROPPING; + if (pVload->dropStatus != TSDB_VN_DROP_STATUS_DROPPING) { + pVload->dropStatus = TSDB_VN_DROP_STATUS_DROPPING; mPrint("dnode:%s vnode:%d db:%s set to dropping status", taosIpStr(pDnode->privateIp), pVnodeGid->vnode, pDb->name); if (mgmtUpdateDnode(pDnode) < 0) { @@ -314,10 +257,10 @@ bool mgmtCheckDropDbFinished(SDbObj *pDb) { SDnodeObj *pDnode = mgmtGetDnode(pVnodeGid->ip); if (pDnode == NULL) continue; - if (pDnode->status == TSDB_STATUS_OFFLINE) continue; + if (pDnode->status == TSDB_DN_STATUS_OFFLINE) continue; SVnodeLoad *pVload = &pDnode->vload[pVnodeGid->vnode]; - if (pVload->dropStatus == TSDB_VN_STATUS_DROPPING) { + if (pVload->dropStatus == TSDB_VN_DROP_STATUS_DROPPING) { mTrace("dnode:%s, vnode:%d db:%s wait dropping", taosIpStr(pDnode->privateIp), pVnodeGid->vnode, pDb->name); return false; } @@ -367,16 +310,18 @@ int mgmtDropDb(SDbObj *pDb) { } } -int mgmtDropDbByName(SAcctObj *pAcct, char *name) { +int mgmtDropDbByName(SAcctObj *pAcct, char *name, short ignoreNotExists) { SDbObj *pDb; pDb = (SDbObj *)sdbGetRow(dbSdb, name); if (pDb == NULL) { + if (ignoreNotExists) return TSDB_CODE_SUCCESS; mWarn("db:%s is not there", name); - // return TSDB_CODE_INVALID_DB; - return 0; + return TSDB_CODE_INVALID_DB; } - if (taosCheckDbName(pDb->name, tsMonitorDbName)) return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) { + return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + } return mgmtDropDb(pDb); } @@ -441,7 +386,7 @@ int mgmtAlterDb(SAcctObj *pAcct, SAlterDbMsg *pAlter) { SVgObj *pVgroup = pDb->pHead; while (pVgroup != NULL) { - mgmtUpdateVgroupState(pVgroup, LB_VGROUP_STATE_UPDATE, 0); + mgmtUpdateVgroupState(pVgroup, TSDB_VG_LB_STATUS_UPDATE, 0); if (oldReplicaNum < pDb->cfg.replications) { if (!mgmtAddVnode(pVgroup, NULL, NULL)) { mWarn("db:%s vgroup:%d not enough dnode to add vnode", pAlter->db, pVgroup->vgId); diff --git a/src/system/detail/src/mgmtDnode.c b/src/system/detail/src/mgmtDnode.c index 36e3a415955bd76a0b76759935590c0846730719..2c96b9a3c0e99a4d014bd981a0f2cdd9fddab596 100644 --- a/src/system/detail/src/mgmtDnode.c +++ b/src/system/detail/src/mgmtDnode.c @@ -15,14 +15,12 @@ #define _DEFAULT_SOURCE -#include -#include -#include +#include "os.h" #include "dnodeSystem.h" #include "mgmt.h" #include "tschemautil.h" -#include "tstatus.h" +#include "vnodeStatus.h" bool mgmtCheckModuleInDnode(SDnodeObj *pDnode, int moduleType); int mgmtGetDnodesNum(); @@ -45,29 +43,30 @@ void mgmtSetDnodeMaxVnodes(SDnodeObj *pDnode) { pDnode->openVnodes = 0; #ifdef CLUSTER - pDnode->status = TSDB_STATUS_OFFLINE; + pDnode->status = TSDB_DN_STATUS_OFFLINE; #else - pDnode->status = TSDB_STATUS_READY; + pDnode->status = TSDB_DN_STATUS_READY; #endif } void mgmtCalcNumOfFreeVnodes(SDnodeObj *pDnode) { int totalVnodes = 0; + mTrace("dnode:%s, begin calc free vnodes", taosIpStr(pDnode->privateIp)); for (int i = 0; i < pDnode->numOfVnodes; ++i) { SVnodeLoad *pVload = pDnode->vload + i; if (pVload->vgId != 0) { - mTrace("dnode:%s, calc free vnodes, exist vnode:%d, vgroup:%d, state:%d %s, dropstate:%d %s, syncstatus:%d %s", - taosIpStr(pDnode->privateIp), i, pVload->vgId, - pVload->status, sdbDnodeStatusStr[pVload->status], - pVload->dropStatus, sdbVnodeDropStateStr[pVload->dropStatus], - pVload->syncStatus, sdbVnodeSyncStatusStr[pVload->syncStatus]); + mTrace("%d-dnode:%s, calc free vnodes, exist vnode:%d, vgroup:%d, state:%d %s, dropstate:%d %s, syncstatus:%d %s", + totalVnodes, taosIpStr(pDnode->privateIp), i, pVload->vgId, + pVload->status, taosGetVnodeStatusStr(pVload->status), + pVload->dropStatus, taosGetVnodeDropStatusStr(pVload->dropStatus), + pVload->syncStatus, taosGetVnodeSyncStatusStr(pVload->syncStatus)); totalVnodes++; } } pDnode->numOfFreeVnodes = pDnode->numOfVnodes - totalVnodes; - mTrace("dnode:%s, calc free vnodes, numOfVnodes:%d, numOfFreeVnodes:%d, totalVnodes:%d", + mTrace("dnode:%s, numOfVnodes:%d, numOfFreeVnodes:%d, totalVnodes:%d", taosIpStr(pDnode->privateIp), pDnode->numOfVnodes, pDnode->numOfFreeVnodes, totalVnodes); } @@ -198,11 +197,11 @@ int mgmtRetrieveDnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - strcpy(pWrite, sdbDnodeStatusStr[pDnode->status]); + strcpy(pWrite, taosGetDnodeStatusStr(pDnode->status) ); cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - strcpy(pWrite, sdbDnodeBalanceStateStr[pDnode->lbState]); + strcpy(pWrite, taosGetDnodeLbStatusStr(pDnode->lbStatus)); cols++; tinet_ntoa(ipstr, pDnode->publicIp); @@ -294,7 +293,7 @@ int mgmtRetrieveModules(SShowObj *pShow, char *data, int rows, SConnObj *pConn) cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - strcpy(pWrite, sdbDnodeStatusStr[pDnode->status]); + strcpy(pWrite, taosGetDnodeStatusStr(pDnode->status) ); cols++; numOfRows++; @@ -389,3 +388,120 @@ int mgmtRetrieveConfigs(SShowObj *pShow, char *data, int rows, SConnObj *pConn) pShow->numOfReads += numOfRows; return numOfRows; } + +int mgmtGetVnodeMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + if (strcmp(pConn->pAcct->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; + + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "vnode"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "vgid"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 12; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "status"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 12; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "sync status"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + // TODO: if other thread drop dnode ???? + SDnodeObj *pDnode = NULL; + if (pShow->payloadLen > 0 ) { + uint32_t ip = ip2uint(pShow->payload); + pDnode = mgmtGetDnode(ip); + if (NULL == pDnode) { + return TSDB_CODE_NODE_OFFLINE; + } + + pShow->numOfRows = pDnode->openVnodes; + pShow->pNode = pDnode; + + } else { + while (true) { + pShow->pNode = mgmtGetNextDnode(pShow, (SDnodeObj **)&pDnode); + if (pDnode == NULL) break; + pShow->numOfRows += pDnode->openVnodes; + + if (0 == pShow->numOfRows) return TSDB_CODE_NODE_OFFLINE; + } + + pShow->pNode = NULL; + } + + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + return 0; +} + +int mgmtRetrieveVnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + SDnodeObj *pDnode = NULL; + char * pWrite; + int cols = 0; + + if (0 == rows) return 0; + + if (pShow->payloadLen) { + // output the vnodes info of the designated dnode. And output all vnodes of this dnode, instead of rows (max 100) + pDnode = (SDnodeObj *)(pShow->pNode); + if (pDnode != NULL) { + SVnodeLoad* pVnode; + for (int i = 0 ; i < TSDB_MAX_VNODES; i++) { + pVnode = &pDnode->vload[i]; + if (0 == pVnode->vgId) { + continue; + } + + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(uint32_t *)pWrite = pVnode->vnode; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(uint32_t *)pWrite = pVnode->vgId; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, taosGetVnodeStatusStr(pVnode->status)); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, taosGetVnodeSyncStatusStr(pVnode->syncStatus)); + cols++; + + numOfRows++; + } + } + } else { + // TODO: output all vnodes of all dnodes + numOfRows = 0; + } + + pShow->numOfReads += numOfRows; + return numOfRows; +} + + diff --git a/src/system/detail/src/mgmtDnodeInt.c b/src/system/detail/src/mgmtDnodeInt.c index 2b7fe3cf442e3e7aab2a10af0e974692fb886e77..6b6571b06c713bd9b6c41dab690652cf1c6fd639 100644 --- a/src/system/detail/src/mgmtDnodeInt.c +++ b/src/system/detail/src/mgmtDnodeInt.c @@ -14,8 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include +#include "os.h" #include "dnodeSystem.h" #include "mgmt.h" @@ -44,6 +43,11 @@ int mgmtProcessMeterCfgMsg(char *cont, int contLen, SDnodeObj *pObj) { SMeterCfgMsg *pCfg = (SMeterCfgMsg *)cont; SVgObj * pVgroup; + if (!sdbMaster) { + taosSendSimpleRspToDnode(pObj, TSDB_MSG_TYPE_METER_CFG_RSP, TSDB_CODE_REDIRECT); + return 0; + } + int vnode = htonl(pCfg->vnode); int sid = htonl(pCfg->sid); @@ -52,6 +56,7 @@ int mgmtProcessMeterCfgMsg(char *cont, int contLen, SDnodeObj *pObj) { taosSendSimpleRspToDnode(pObj, TSDB_MSG_TYPE_METER_CFG_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); return 0; } + pMsg = pStart; if (vnode < pObj->numOfVnodes) { @@ -88,10 +93,18 @@ int mgmtProcessVpeerCfgMsg(char *cont, int contLen, SDnodeObj *pObj) { SVpeerCfgMsg *pCfg = (SVpeerCfgMsg *)cont; SVgObj * pVgroup = NULL; + if (!sdbMaster) { + taosSendSimpleRspToDnode(pObj, TSDB_MSG_TYPE_VPEER_CFG_RSP, TSDB_CODE_REDIRECT); + return 0; + } + int vnode = htonl(pCfg->vnode); pStart = taosBuildRspMsgToDnode(pObj, TSDB_MSG_TYPE_VPEER_CFG_RSP); - if (pStart == NULL) return 0; + if (pStart == NULL) { + taosSendSimpleRspToDnode(pObj, TSDB_MSG_TYPE_VPEER_CFG_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); + return 0; + } pMsg = pStart; if (vnode < pObj->numOfVnodes) pVgroup = mgmtGetVgroup(pObj->vload[vnode].vgId); @@ -100,10 +113,10 @@ int mgmtProcessVpeerCfgMsg(char *cont, int contLen, SDnodeObj *pObj) { *pMsg = 0; pMsg++; pMsg = mgmtBuildVpeersIe(pMsg, pVgroup, vnode); - mTrace("dnode:%s, vnode:%d, vgroup:%d, send create meter msg, code:%d", taosIpStr(pObj->privateIp), vnode, pVgroup->vgId, *pMsg); + mTrace("dnode:%s, vnode:%d, vgroup:%d, send create vnode msg, code:%d", taosIpStr(pObj->privateIp), vnode, pVgroup->vgId, *pMsg); } else { mTrace("dnode:%s, vnode:%d, no vgroup info, vgroup:%d", taosIpStr(pObj->privateIp), vnode, pObj->vload[vnode].vgId); - *pMsg = TSDB_CODE_INVALID_VALUE; + *pMsg = TSDB_CODE_NOT_ACTIVE_VNODE; pMsg++; *(int32_t *)pMsg = htonl(vnode); pMsg += sizeof(int32_t); @@ -122,14 +135,20 @@ int mgmtProcessFreeVnodeRsp(char *msg, int msgLen, SDnodeObj *pObj) { return 0; int mgmtProcessVPeersRsp(char *msg, int msgLen, SDnodeObj *pObj) { STaosRsp *pRsp = (STaosRsp *)msg; + if (!sdbMaster) { + taosSendSimpleRspToDnode(pObj, TSDB_MSG_TYPE_VPEERS_RSP, TSDB_CODE_REDIRECT); + return 0; + } + SDbObj *pDb = mgmtGetDb(pRsp->more); if (!pDb) { - mError("dnode:%s, db not find, code:%d", taosIpStr(pObj->privateIp), pRsp->code); + mError("dnode:%s, db:%s not find, code:%d", taosIpStr(pObj->privateIp), pRsp->more, pRsp->code); return 0; } if (pDb->vgStatus != TSDB_VG_STATUS_IN_PROGRESS) { - mTrace("dnode:%s, db:%s vpeer rsp already disposed, code:%d", taosIpStr(pObj->privateIp), pRsp->more, pRsp->code); + mTrace("dnode:%s, db:%s vpeer rsp already disposed, vgroup status:%s code:%d", + taosIpStr(pObj->privateIp), pRsp->more, taosGetVgroupStatusStr(pDb->vgStatus), pRsp->code); return 0; } @@ -141,10 +160,11 @@ int mgmtProcessVPeersRsp(char *msg, int msgLen, SDnodeObj *pObj) { if (pRsp->code == TSDB_CODE_VG_COMMITLOG_INIT_FAILED) { pDb->vgStatus = TSDB_VG_STATUS_COMMITLOG_INIT_FAILED; + mError("dnode:%s, db:%s vgroup commit log init failed, code:%d", taosIpStr(pObj->privateIp), pRsp->more, pRsp->code); } else { pDb->vgStatus = TSDB_VG_STATUS_INIT_FAILED; + mError("dnode:%s, db:%s vgroup init failed, code:%d", taosIpStr(pObj->privateIp), pRsp->more, pRsp->code); } - mError("dnode:%s, db:%s vgroup create failed, code:%d", taosIpStr(pObj->privateIp), pRsp->more, pRsp->code); return 0; } @@ -332,7 +352,6 @@ char *mgmtBuildVpeersIe(char *pMsg, SVgObj *pVgroup, int vnode) { pCfg->replications = (char)pVgroup->numOfVnodes; pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock); -#ifdef CLUSTER SVPeerDesc *vpeerDesc = pVPeers->vpeerDesc; pMsg = (char *)(pVPeers->vpeerDesc); @@ -342,7 +361,6 @@ char *mgmtBuildVpeersIe(char *pMsg, SVgObj *pVgroup, int vnode) { vpeerDesc[j].vnode = htonl(pVgroup->vnodeGid[j].vnode); pMsg += sizeof(SVPeerDesc); } -#endif return pMsg; } diff --git a/src/system/detail/src/mgmtMeter.c b/src/system/detail/src/mgmtMeter.c index bccf9a06c8652cecf6e0ef9b884479523358c614..6ff9448c09caf49a6d26bb32d22c1eb4ffa98b5e 100644 --- a/src/system/detail/src/mgmtMeter.c +++ b/src/system/detail/src/mgmtMeter.c @@ -14,10 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" #include "mgmt.h" #include "mgmtUtil.h" @@ -30,6 +27,7 @@ #include "tsqlfunction.h" #include "ttime.h" #include "vnodeTagMgmt.h" +#include "vnodeStatus.h" extern int64_t sdbVersion; @@ -235,6 +233,10 @@ void *mgmtMeterActionDelete(void *row, char *str, int size, int *ssize) { pMeter = (STabObj *)row; if (mgmtIsNormalMeter(pMeter)) { + if (pMeter->gid.vgId == 0) { + return NULL; + } + pVgroup = mgmtGetVgroup(pMeter->gid.vgId); if (pVgroup == NULL) { mError("id:%s not in vgroup:%d", pMeter->meterId, pMeter->gid.vgId); @@ -416,8 +418,8 @@ void *mgmtMeterActionAfterBatchUpdate(void *row, char *str, int size, int *ssize } void *mgmtMeterAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtMeterActionFp[action] != NULL) { - return (*(mgmtMeterActionFp[action]))(row, str, size, ssize); + if (mgmtMeterActionFp[(uint8_t)action] != NULL) { + return (*(mgmtMeterActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } @@ -428,6 +430,7 @@ void mgmtAddMeterStatisticToAcct(STabObj *pMeter, SAcctObj *pAcct) { int mgmtInitMeters() { void * pNode = NULL; + void * pLastNode = NULL; SVgObj * pVgroup = NULL; STabObj * pMeter = NULL; STabObj * pMetric = NULL; @@ -453,21 +456,47 @@ int mgmtInitMeters() { pNode = NULL; while (1) { + pLastNode = pNode; pNode = sdbFetchRow(meterSdb, pNode, (void **)&pMeter); if (pMeter == NULL) break; pDb = mgmtGetDbByMeterId(pMeter->meterId); if (pDb == NULL) { - mError("failed to get db: %s", pMeter->meterId); + mError("meter:%s, failed to get db, discard it", pMeter->meterId, pMeter->gid.vgId, pMeter->gid.sid); + pMeter->gid.vgId = 0; + sdbDeleteRow(meterSdb, pMeter); + pNode = pLastNode; continue; } if (mgmtIsNormalMeter(pMeter)) { pVgroup = mgmtGetVgroup(pMeter->gid.vgId); - if (pVgroup == NULL || pVgroup->meterList == NULL) { - mError("failed to get vgroup:%i", pMeter->gid.vgId); + + if (pVgroup == NULL) { + mError("meter:%s, failed to get vgroup:%d sid:%d, discard it", pMeter->meterId, pMeter->gid.vgId, pMeter->gid.sid); + pMeter->gid.vgId = 0; + sdbDeleteRow(meterSdb, pMeter); + pNode = pLastNode; continue; } + + if (strcmp(pVgroup->dbName, pDb->name) != 0) { + mError("meter:%s, db:%s not match with vgroup:%d db:%s sid:%d, discard it", + pMeter->meterId, pDb->name, pMeter->gid.vgId, pVgroup->dbName, pMeter->gid.sid); + pMeter->gid.vgId = 0; + sdbDeleteRow(meterSdb, pMeter); + pNode = pLastNode; + continue; + } + + if ( pVgroup->meterList == NULL) { + mError("meter:%s, vgroup:%d meterlist is null", pMeter->meterId, pMeter->gid.vgId); + pMeter->gid.vgId = 0; + sdbDeleteRow(meterSdb, pMeter); + pNode = pLastNode; + continue; + } + pVgroup->meterList[pMeter->gid.sid] = pMeter; taosIdPoolMarkStatus(pVgroup->idPool, pMeter->gid.sid, 1); @@ -505,7 +534,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { int numOfTables = sdbGetNumOfRows(meterSdb); if (numOfTables >= tsMaxTables) { - mWarn("numOfTables:%d, exceed tsMaxTables:%d", numOfTables, tsMaxTables); + mError("table:%s, numOfTables:%d exceed maxTables:%d", pCreate->meterId, numOfTables, tsMaxTables); return TSDB_CODE_TOO_MANY_TABLES; } @@ -513,6 +542,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { assert(pAcct != NULL); int code = mgmtCheckMeterLimit(pAcct, pCreate); if (code != 0) { + mError("table:%s, exceed the limit", pCreate->meterId); return code; } @@ -536,6 +566,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { char *pTagData = (char *)pCreate->schema; // it is a tag key pMetric = mgmtGetMeter(pTagData); if (pMetric == NULL) { + mError("table:%s, corresponding super table does not exist", pCreate->meterId); return TSDB_CODE_INVALID_TABLE; } @@ -548,6 +579,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->schema = (char *)malloc(size); if (pMeter->schema == NULL) { mgmtDestroyMeter(pMeter); + mError("table:%s, corresponding super table schema is null", pCreate->meterId); return TSDB_CODE_INVALID_TABLE; } memset(pMeter->schema, 0, size); @@ -559,13 +591,13 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->pTagData = pMeter->schema; pMeter->nextColId = pMetric->nextColId; memcpy(pMeter->pTagData, pTagData, size); - } else { int numOfCols = pCreate->numOfColumns + pCreate->numOfTags; size = numOfCols * sizeof(SSchema) + pCreate->sqlLen; pMeter->schema = (char *)malloc(size); if (pMeter->schema == NULL) { mgmtDestroyMeter(pMeter); + mError("table:%s, no schema input", pCreate->meterId); return TSDB_CODE_SERV_OUT_OF_MEMORY; } memset(pMeter->schema, 0, size); @@ -586,7 +618,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->pSql = pMeter->schema + numOfCols * sizeof(SSchema); memcpy(pMeter->pSql, (char *)(pCreate->schema) + numOfCols * sizeof(SSchema), pCreate->sqlLen); pMeter->pSql[pCreate->sqlLen - 1] = 0; - mTrace("stream sql len:%d, sql:%s", pCreate->sqlLen, pMeter->pSql); + mTrace("table:%s, stream sql len:%d sql:%s", pCreate->meterId, pCreate->sqlLen, pMeter->pSql); } else { if (pCreate->numOfTags > 0) { pMeter->meterType = TSDB_METER_METRIC; @@ -599,13 +631,14 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->createdTime = taosGetTimestampMs(); strcpy(pMeter->meterId, pCreate->meterId); if (pthread_rwlock_init(&pMeter->rwLock, NULL)) { - mError("Failed to init meter lock"); + mError("table:%s, failed to init meter lock", pCreate->meterId); mgmtDestroyMeter(pMeter); - return TSDB_CODE_OTHERS; + return TSDB_CODE_FAILED_TO_LOCK_RESOURCES; } code = mgmtCheckMeterGrant(pCreate, pMeter); if (code != 0) { + mError("table:%s, grant expired", pCreate->meterId); return code; } @@ -614,21 +647,25 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { if (pDb->vgStatus == TSDB_VG_STATUS_IN_PROGRESS) { mgmtDestroyMeter(pMeter); + //mTrace("table:%s, vgroup in creating progress", pCreate->meterId); return TSDB_CODE_ACTION_IN_PROGRESS; } if (pDb->vgStatus == TSDB_VG_STATUS_FULL) { mgmtDestroyMeter(pMeter); + mError("table:%s, vgroup is full", pCreate->meterId); return TSDB_CODE_NO_ENOUGH_DNODES; } if (pDb->vgStatus == TSDB_VG_STATUS_COMMITLOG_INIT_FAILED) { mgmtDestroyMeter(pMeter); + mError("table:%s, commit log init failed", pCreate->meterId); return TSDB_CODE_VG_COMMITLOG_INIT_FAILED; } if (pDb->vgStatus == TSDB_VG_STATUS_INIT_FAILED) { mgmtDestroyMeter(pMeter); + mError("table:%s, vgroup init failed", pCreate->meterId); return TSDB_CODE_VG_INIT_FAILED; } @@ -636,12 +673,13 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pDb->vgStatus = TSDB_VG_STATUS_IN_PROGRESS; mgmtCreateVgroup(pDb); mgmtDestroyMeter(pMeter); + mTrace("table:%s, vgroup malloced, wait for create progress finished", pCreate->meterId); return TSDB_CODE_ACTION_IN_PROGRESS; } int sid = taosAllocateId(pVgroup->idPool); if (sid < 0) { - mWarn("db:%s, vgroup:%d, run out of ID, num:%d", pDb->name, pVgroup->vgId, taosIdPoolNumOfUsed(pVgroup->idPool)); + mWarn("table:%s, vgroup:%d run out of ID, num:%d", pCreate->meterId, pVgroup->vgId, taosIdPoolNumOfUsed(pVgroup->idPool)); pDb->vgStatus = TSDB_VG_STATUS_IN_PROGRESS; mgmtCreateVgroup(pDb); mgmtDestroyMeter(pMeter); @@ -653,18 +691,21 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->uid = (((uint64_t)pMeter->gid.vgId) << 40) + ((((uint64_t)pMeter->gid.sid) & ((1ul << 24) - 1ul)) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); - mTrace("meter:%s, create meter in vgroup, vgId:%d, sid:%d, vnode:%d, uid:%d", - pMeter->meterId, pVgroup->vgId, sid, pVgroup->vnodeGid[0].vnode, pMeter->uid); + mTrace("table:%s, create table in vgroup, vgId:%d sid:%d vnode:%d uid:%llu db:%s", + pMeter->meterId, pVgroup->vgId, sid, pVgroup->vnodeGid[0].vnode, pMeter->uid, pDb->name); } else { pMeter->uid = (((uint64_t)pMeter->createdTime) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); } - if (sdbInsertRow(meterSdb, pMeter, 0) < 0) return TSDB_CODE_SDB_ERROR; + if (sdbInsertRow(meterSdb, pMeter, 0) < 0) { + mError("table:%s, update sdb error", pCreate->meterId); + return TSDB_CODE_SDB_ERROR; + } // send create message to the selected vnode servers if (pCreate->numOfTags == 0) { - mTrace("meter:%s, send msg to dnode, vgId:%d, sid:%d, vnode:%d, dbname:%s", - pMeter->meterId, pMeter->gid.vgId, pMeter->gid.sid, pVgroup->vnodeGid[0].vnode, pDb->name); + mTrace("table:%s, send create table msg to dnode, vgId:%d, sid:%d, vnode:%d", + pMeter->meterId, pMeter->gid.vgId, pMeter->gid.sid, pVgroup->vnodeGid[0].vnode); grantAddTimeSeries(pMeter->numOfColumns - 1); mgmtSendCreateMsgToVgroup(pMeter, pVgroup); @@ -688,8 +729,10 @@ int mgmtDropMeter(SDbObj *pDb, char *meterId, int ignore) { pAcct = mgmtGetAcct(pDb->cfg.acct); - // 0.sys - if (taosCheckDbName(pDb->name, tsMonitorDbName)) return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + // 0.log + if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) { + return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + } if (mgmtIsNormalMeter(pMeter)) { return dropMeterImp(pDb, pMeter, pAcct); @@ -719,8 +762,8 @@ int mgmtAlterMeter(SDbObj *pDb, SAlterTableMsg *pAlter) { return TSDB_CODE_INVALID_TABLE; } - // 0.sys - if (taosCheckDbName(pDb->name, tsMonitorDbName)) return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + // 0.log + if (mgmtCheckIsMonitorDB(pDb->name, tsMonitorDbName)) return TSDB_CODE_MONITOR_DB_FORBEIDDEN; if (pAlter->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { if (!mgmtIsNormalMeter(pMeter) || !mgmtMeterCreateFromMetric(pMeter)) { @@ -833,6 +876,7 @@ static void removeMeterFromMetricIndex(STabObj *pMetric, STabObj *pMeter) { } } + tSkipListDestroyKey(&key); if (num != 0) { free(pRes); } @@ -881,7 +925,10 @@ void mgmtCleanUpMeters() { sdbCloseTable(meterSdb); } int mgmtGetMeterMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { int cols = 0; - if (pConn->pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; SSchema *pSchema = tsGetSchema(pMeta); @@ -916,7 +963,7 @@ int mgmtGetMeterMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; // pShow->numOfRows = sdbGetNumOfRows (meterSdb); - pShow->numOfRows = pConn->pDb->numOfTables; + pShow->numOfRows = pDb->numOfTables; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; return 0; @@ -937,10 +984,32 @@ SSchema *mgmtGetMeterSchema(STabObj *pMeter) { return (SSchema *)pMetric->schema; } +static int32_t mgmtSerializeTagValue(char* pMsg, STabObj* pMeter, int16_t* tagsId, int32_t numOfTags) { + int32_t offset = 0; + + for (int32_t j = 0; j < numOfTags; ++j) { + if (tagsId[j] == TSDB_TBNAME_COLUMN_INDEX) { // handle the table name tags + char name[TSDB_METER_NAME_LEN] = {0}; + extractTableName(pMeter->meterId, name); + + memcpy(pMsg + offset, name, TSDB_METER_NAME_LEN); + offset += TSDB_METER_NAME_LEN; + } else { + SSchema s = {0}; + char * tag = mgmtMeterGetTag(pMeter, tagsId[j], &s); + + memcpy(pMsg + offset, tag, (size_t)s.bytes); + offset += s.bytes; + } + } + + return offset; +} + /* * serialize SVnodeSidList to byte array */ -static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidList **pList, SMetricMeta *pMeta, +static char *mgmtBuildMetricMetaMsg(SConnObj *pConn, STabObj *pMeter, int32_t *ovgId, SVnodeSidList **pList, SMetricMeta *pMeta, int32_t tagLen, int16_t numOfTags, int16_t *tagsId, int32_t maxNumOfMeters, char *pMsg) { if (pMeter->gid.vgId != *ovgId || ((*pList) != NULL && (*pList)->numOfSids >= maxNumOfMeters)) { @@ -949,7 +1018,6 @@ static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidLi * 1. the query msg may be larger than 64k, * 2. the following meters belong to different vnodes */ - (*pList) = (SVnodeSidList *)pMsg; (*pList)->numOfSids = 0; (*pList)->index = 0; @@ -957,8 +1025,13 @@ static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidLi SVgObj *pVgroup = mgmtGetVgroup(pMeter->gid.vgId); for (int i = 0; i < TSDB_VNODES_SUPPORT; ++i) { - (*pList)->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; - (*pList)->vpeerDesc[i].vnode = pVgroup->vnodeGid[i].vnode; + if (pConn->usePublicIp) { + (*pList)->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; + (*pList)->vpeerDesc[i].vnode = pVgroup->vnodeGid[i].vnode; + } else { + (*pList)->vpeerDesc[i].ip = pVgroup->vnodeGid[i].ip; + (*pList)->vpeerDesc[i].vnode = pVgroup->vnodeGid[i].vnode; + } } pMsg += sizeof(SVnodeSidList); @@ -968,29 +1041,15 @@ static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidLi (*pList)->numOfSids++; SMeterSidExtInfo *pSMeterTagInfo = (SMeterSidExtInfo *)pMsg; - pSMeterTagInfo->sid = pMeter->gid.sid; + pSMeterTagInfo->sid = htonl(pMeter->gid.sid); + pSMeterTagInfo->uid = htobe64(pMeter->uid); + pMsg += sizeof(SMeterSidExtInfo); - int32_t offset = 0; - for (int32_t j = 0; j < numOfTags; ++j) { - if (tagsId[j] == -1) { - char name[TSDB_METER_NAME_LEN] = {0}; - extractMeterName(pMeter->meterId, name); - - memcpy(pMsg + offset, name, TSDB_METER_NAME_LEN); - offset += TSDB_METER_NAME_LEN; - } else { - SSchema s = {0}; - char * tag = mgmtMeterGetTag(pMeter, tagsId[j], &s); - - memcpy(pMsg + offset, tag, (size_t)s.bytes); - offset += s.bytes; - } - } - - pMsg += offset; + int32_t offset = mgmtSerializeTagValue(pMsg, pMeter, tagsId, numOfTags); assert(offset == tagLen); - + + pMsg += offset; return pMsg; } @@ -1047,18 +1106,21 @@ static SMetricMetaElemMsg *doConvertMetricMetaMsg(SMetricMetaMsg *pMetricMetaMsg pElem->groupbyTagColumnList = htonl(pElem->groupbyTagColumnList); - int16_t *groupColIds = (int16_t*) (((char *)pMetricMetaMsg) + pElem->groupbyTagColumnList); + SColIndexEx *groupColIds = (SColIndexEx*) (((char *)pMetricMetaMsg) + pElem->groupbyTagColumnList); for (int32_t i = 0; i < pElem->numOfGroupCols; ++i) { - groupColIds[i] = htons(groupColIds[i]); + groupColIds[i].colId = htons(groupColIds[i].colId); + groupColIds[i].colIdx = htons(groupColIds[i].colIdx); + groupColIds[i].flag = htons(groupColIds[i].flag); + groupColIds[i].colIdxInBuf = 0; } return pElem; } -static int32_t mgmtBuildMetricMetaRspMsg(void *thandle, SMetricMetaMsg *pMetricMetaMsg, tQueryResultset *pResult, +static int32_t mgmtBuildMetricMetaRspMsg(SConnObj *pConn, SMetricMetaMsg *pMetricMetaMsg, tQueryResultset *pResult, char **pStart, int32_t *tagLen, int32_t rspMsgSize, int32_t maxTablePerVnode, int32_t code) { - *pStart = taosBuildRspMsgWithSize(thandle, TSDB_MSG_TYPE_METRIC_META_RSP, rspMsgSize); + *pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_METRIC_META_RSP, rspMsgSize); if (*pStart == NULL) { return 0; } @@ -1096,7 +1158,7 @@ static int32_t mgmtBuildMetricMetaRspMsg(void *thandle, SMetricMetaMsg *pMetricM for (int32_t i = 0; i < pResult[j].num; ++i) { STabObj *pMeter = pResult[j].pRes[i]; - pMsg = mgmtBuildMetricMetaMsg(pMeter, &ovgId, &pList, pMeta, tagLen[j], pElem->numOfTags, pElem->tagCols, + pMsg = mgmtBuildMetricMetaMsg(pConn, pMeter, &ovgId, &pList, pMeta, tagLen[j], pElem->numOfTags, pElem->tagCols, maxTablePerVnode, pMsg); } @@ -1112,7 +1174,7 @@ static int32_t mgmtBuildMetricMetaRspMsg(void *thandle, SMetricMetaMsg *pMetricM return msgLen; } -int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pMetricMetaMsg) { +int mgmtRetrieveMetricMeta(SConnObj *pConn, char **pStart, SMetricMetaMsg *pMetricMetaMsg) { /* * naive method: Do not limit the maximum number of meters in each * vnode(subquery), split the result according to vnodes @@ -1159,12 +1221,9 @@ int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pMetric #endif if (ret == TSDB_CODE_SUCCESS) { + // todo opt performance for (int32_t i = 0; i < pMetricMetaMsg->numOfMeters; ++i) { ret = mgmtRetrieveMetersFromMetric(pMetricMetaMsg, i, &result[i]); - // todo opt performance - // if (result[i].num <= 0) {//no result - // } else if (result[i].num < 10) { - // } } } @@ -1186,8 +1245,7 @@ int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pMetric msgLen = 512; } - msgLen = mgmtBuildMetricMetaRspMsg(thandle, pMetricMetaMsg, result, pStart, tagLen, msgLen, maxMetersPerVNodeForQuery, - ret); + msgLen = mgmtBuildMetricMetaRspMsg(pConn, pMetricMetaMsg, result, pStart, tagLen, msgLen, maxMetersPerVNodeForQuery, ret); for (int32_t i = 0; i < pMetricMetaMsg->numOfMeters; ++i) { tQueryResultClean(&result[i]); @@ -1208,8 +1266,12 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { int numOfRead = 0; char prefix[20] = {0}; - if (pConn->pDb == NULL) return 0; - strcpy(prefix, pConn->pDb->name); + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pDb == NULL) return 0; + + strcpy(prefix, pDb->name); strcat(prefix, TS_PATH_DELIMITER); prefixLen = strlen(prefix); @@ -1229,7 +1291,7 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { memset(meterName, 0, tListLen(meterName)); // pattern compare for meter name - extractMeterName(pMeter->meterId, meterName); + extractTableName(pMeter->meterId, meterName); if (pShow->payloadLen > 0 && patternMatch(pShow->payload, meterName, TSDB_METER_NAME_LEN, &info) != TSDB_PATTERN_MATCH) @@ -1251,7 +1313,7 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; if (pMeter->pTagData) { - extractMeterName(pMeter->pTagData, pWrite); + extractTableName(pMeter->pTagData, pWrite); } cols++; @@ -1269,7 +1331,10 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { int mgmtGetMetricMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { int cols = 0; - if (pConn->pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; SSchema *pSchema = tsGetSchema(pMeta); @@ -1309,8 +1374,8 @@ int mgmtGetMetricMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { pShow->offset[0] = 0; for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; - pShow->numOfRows = pConn->pDb->numOfMetrics; - pShow->pNode = pConn->pDb->pMetric; + pShow->numOfRows = pDb->numOfMetrics; + pShow->pNode = pDb->pMetric; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; return 0; @@ -1332,7 +1397,7 @@ int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn) pShow->pNode = (void *)pMetric->next; memset(metricName, 0, tListLen(metricName)); - extractMeterName(pMetric->meterId, metricName); + extractTableName(pMetric->meterId, metricName); if (pShow->payloadLen > 0 && patternMatch(pShow->payload, metricName, TSDB_METER_NAME_LEN, &info) != TSDB_PATTERN_MATCH) @@ -1341,7 +1406,7 @@ int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn) cols = 0; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - extractMeterName(pMetric->meterId, pWrite); + extractTableName(pMetric->meterId, pWrite); cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; diff --git a/src/system/detail/src/mgmtProfile.c b/src/system/detail/src/mgmtProfile.c index e641739e31c9e8498e12c6b6cab1525c8a66791e..e7dbeaaa254da098dcdac5a15b6b0feccb5f32f2 100644 --- a/src/system/detail/src/mgmtProfile.c +++ b/src/system/detail/src/mgmtProfile.c @@ -14,15 +14,16 @@ */ #define _DEFAULT_SOURCE +#include "os.h" + #include "mgmt.h" -#include #include "mgmtProfile.h" #include "taosmsg.h" #include "tschemautil.h" typedef struct { uint32_t ip; - short port; + uint16_t port; char user[TSDB_METER_ID_LEN]; } SCDesc; @@ -179,7 +180,7 @@ int mgmtKillQuery(char *qidstr, SConnObj *pConn) { chr = strchr(temp, ':'); if (chr == NULL) goto _error; *chr = 0; - short port = htons(atoi(temp)); + uint16_t port = htons(atoi(temp)); temp = chr + 1; uint32_t queryId = atoi(temp); @@ -447,7 +448,7 @@ int mgmtKillStream(char *qidstr, SConnObj *pConn) { chr = strchr(temp, ':'); if (chr == NULL) goto _error; *chr = 0; - short port = htons(atoi(temp)); + uint16_t port = htons(atoi(temp)); temp = chr + 1; uint32_t streamId = atoi(temp); diff --git a/src/system/detail/src/mgmtShell.c b/src/system/detail/src/mgmtShell.c index 44b9ff286c6c84dcdf4fea4cb9496632d170859d..50d972d6e0600e05a1b4fa8ff12f8aff3b581119 100644 --- a/src/system/detail/src/mgmtShell.c +++ b/src/system/detail/src/mgmtShell.c @@ -14,19 +14,14 @@ */ #define _DEFAULT_SOURCE -#include +#include "os.h" #include "dnodeSystem.h" #include "mgmt.h" #include "mgmtProfile.h" #include "taosmsg.h" #include "tlog.h" - -#pragma GCC diagnostic push - -#pragma GCC diagnostic ignored "-Woverflow" -#pragma GCC diagnostic ignored "-Wpointer-sign" -#pragma GCC diagnostic ignored "-Wint-conversion" +#include "vnodeStatus.h" #define MAX_LEN_OF_METER_META (sizeof(SMultiMeterMeta) + sizeof(SSchema) * TSDB_MAX_COLUMNS + sizeof(SSchema) * TSDB_MAX_TAGS + TSDB_MAX_TAGS_LEN) @@ -78,11 +73,8 @@ int mgmtInitShell() { if (numOfThreads < 1) numOfThreads = 1; memset(&rpcInit, 0, sizeof(rpcInit)); -#ifdef CLUSTER - rpcInit.localIp = tsInternalIp; -#else - rpcInit.localIp = "0.0.0.0"; -#endif + + rpcInit.localIp = tsAnyIp ? "0.0.0.0" : tsPrivateIp;; rpcInit.localPort = tsMgmtShellPort; rpcInit.label = "MND-shell"; rpcInit.numOfThreads = numOfThreads; @@ -189,8 +181,11 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { int size = sizeof(STaosHeader) + sizeof(STaosRsp) + sizeof(SMeterMeta) + sizeof(SSchema) * TSDB_MAX_COLUMNS + sizeof(SSchema) * TSDB_MAX_TAGS + TSDB_MAX_TAGS_LEN + TSDB_EXTRA_PAYLOAD_SIZE; + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + // todo db check should be extracted - if (pConn->pDb == NULL || (pConn->pDb != NULL && pConn->pDb->dropStatus != TSDB_DB_STATUS_READY)) { + if (pDb == NULL || (pDb != NULL && pDb->dropStatus != TSDB_DB_STATUS_READY)) { if ((pStart = mgmtAllocMsg(pConn, size, &pMsg, &pRsp)) == NULL) { taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_METERINFO_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); @@ -223,10 +218,10 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { SDbObj* pMeterDb = mgmtGetDbByMeterId(pCreateMsg->meterId); mTrace("meter:%s, pConnDb:%p, pConnDbName:%s, pMeterDb:%p, pMeterDbName:%s", - pCreateMsg->meterId, pConn->pDb, pConn->pDb->name, pMeterDb, pMeterDb->name); - assert(pConn->pDb == pMeterDb); + pCreateMsg->meterId, pDb, pDb->name, pMeterDb, pMeterDb->name); + assert(pDb == pMeterDb); - int32_t code = mgmtCreateMeter(pConn->pDb, pCreateMsg); + int32_t code = mgmtCreateMeter(pDb, pCreateMsg); char stableName[TSDB_METER_ID_LEN] = {0}; strncpy(stableName, pInfo->tags, TSDB_METER_ID_LEN); @@ -256,7 +251,7 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { } if (pMeterObj == NULL) { - if (pConn->pDb) + if (pDb) pRsp->code = TSDB_CODE_INVALID_TABLE; else pRsp->code = TSDB_CODE_DB_NOT_SELECTED; @@ -274,7 +269,7 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { pMeta->vgid = htonl(pMeterObj->gid.vgId); pMeta->sversion = htons(pMeterObj->sversion); - pMeta->precision = pConn->pDb->cfg.precision; + pMeta->precision = pDb->cfg.precision; pMeta->numOfTags = pMeterObj->numOfTags; pMeta->numOfColumns = htons(pMeterObj->numOfColumns); @@ -313,8 +308,13 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { goto _exit_code; } for (int i = 0; i < TSDB_VNODES_SUPPORT; ++i) { - pMeta->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; - pMeta->vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + if (pConn->usePublicIp) { + pMeta->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; + pMeta->vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } else { + pMeta->vpeerDesc[i].ip = pVgroup->vnodeGid[i].ip; + pMeta->vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } } } } @@ -452,8 +452,13 @@ int mgmtProcessMultiMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { } for (int i = 0; i < TSDB_VNODES_SUPPORT; ++i) { - pMeta->meta.vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; - pMeta->meta.vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + if (pConn->usePublicIp) { + pMeta->meta.vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; + pMeta->meta.vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } else { + pMeta->meta.vpeerDesc[i].ip = pVgroup->vnodeGid[i].ip; + pMeta->meta.vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } } } } @@ -505,7 +510,10 @@ int mgmtProcessMetricMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { SMetricMetaElemMsg *pElem = (SMetricMetaElemMsg *)(((char *)pMetricMetaMsg) + pMetricMetaMsg->metaElem[0]); pMetric = mgmtGetMeter(pElem->meterId); - if (pMetric == NULL || (pConn->pDb != NULL && pConn->pDb->dropStatus != TSDB_DB_STATUS_READY)) { + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pMetric == NULL || (pDb != NULL && pDb->dropStatus != TSDB_DB_STATUS_READY)) { pStart = taosBuildRspMsg(pConn->thandle, TSDB_MSG_TYPE_METRIC_META_RSP); if (pStart == NULL) { taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_METRIC_META_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); @@ -514,7 +522,7 @@ int mgmtProcessMetricMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { pMsg = pStart; pRsp = (STaosRsp *)pMsg; - if (pConn->pDb) + if (pDb) pRsp->code = TSDB_CODE_INVALID_TABLE; else pRsp->code = TSDB_CODE_DB_NOT_SELECTED; @@ -522,7 +530,7 @@ int mgmtProcessMetricMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { msgLen = pMsg - pStart; } else { - msgLen = mgmtRetrieveMetricMeta(pConn->thandle, &pStart, pMetricMetaMsg); + msgLen = mgmtRetrieveMetricMeta(pConn, &pStart, pMetricMetaMsg); if (msgLen <= 0) { taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_METRIC_META_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); return 0; @@ -670,77 +678,164 @@ int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { SAlterUserMsg *pAlter = (SAlterUserMsg *)pMsg; int code = 0; SUserObj * pUser; + SUserObj * pOperUser; if (mgmtCheckRedirectMsg(pConn, TSDB_MSG_TYPE_ALTER_USER_RSP) != 0) { return 0; } pUser = mgmtGetUser(pAlter->user); + pOperUser = mgmtGetUser(pConn->pUser->user); + if (pUser == NULL) { taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, TSDB_CODE_INVALID_USER); return 0; } - if (strcmp(pUser->user, "monitor") == 0 || strcmp(pUser->user, "stream") == 0) { + if (pOperUser == NULL) { + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, TSDB_CODE_INVALID_USER); + return 0; + } + + if (strcmp(pUser->user, "monitor") == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) { code = TSDB_CODE_NO_RIGHTS; - } else if ((strcmp(pUser->user, pConn->pUser->user) == 0) || - ((strcmp(pUser->acct, pConn->pAcct->user) == 0) && pConn->superAuth) || - (strcmp(pConn->pUser->user, "root") == 0)) { - if ((pAlter->flag & TSDB_ALTER_USER_PASSWD) != 0) { + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, code); + return 0; + } + + if ((pAlter->flag & TSDB_ALTER_USER_PASSWD) != 0) { + bool hasRight = false; + if (strcmp(pOperUser->user, "root") == 0) { + hasRight = true; + } else if (strcmp(pUser->user, pOperUser->user) == 0) { + hasRight = true; + } else if (pOperUser->superAuth) { + if (strcmp(pUser->user, "root") == 0) { + hasRight = false; + } else if (strcmp(pOperUser->acct, pUser->acct) != 0) { + hasRight = false; + } else { + hasRight = true; + } + } + + if (hasRight) { memset(pUser->pass, 0, sizeof(pUser->pass)); taosEncryptPass(pAlter->pass, strlen(pAlter->pass), pUser->pass); + code = mgmtUpdateUser(pUser); + mLPrint("user:%s password is altered by %s, code:%d", pAlter->user, pConn->pUser->user, code); + } else { + code = TSDB_CODE_NO_RIGHTS; } - if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) { - if (pAlter->privilege == 1) { // super - pUser->superAuth = 1; - pUser->writeAuth = 1; - } - if (pAlter->privilege == 2) { // read - pUser->superAuth = 0; - pUser->writeAuth = 0; + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, code); + return 0; + } + + if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) { + bool hasRight = false; + if (strcmp(pUser->user, "root") == 0) { + hasRight = false; + } else if (strcmp(pOperUser->user, "root") == 0) { + hasRight = true; + } else if (strcmp(pUser->user, pOperUser->user) == 0) { + hasRight = false; + } else if (pOperUser->superAuth) { + if (strcmp(pUser->user, "root") == 0) { + hasRight = false; + } else if (strcmp(pOperUser->acct, pUser->acct) != 0) { + hasRight = false; + } else { + hasRight = true; } - if (pAlter->privilege == 3) { // write - pUser->superAuth = 0; - pUser->writeAuth = 1; + } + + if (hasRight) { + if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) { + if (pAlter->privilege == 1) { // super + pUser->superAuth = 1; + pUser->writeAuth = 1; + } + if (pAlter->privilege == 2) { // read + pUser->superAuth = 0; + pUser->writeAuth = 0; + } + if (pAlter->privilege == 3) { // write + pUser->superAuth = 0; + pUser->writeAuth = 1; + } } + code = mgmtUpdateUser(pUser); + mLPrint("user:%s privilege is altered by %s, code:%d", pAlter->user, pConn->pUser->user, code); + } else { + code = TSDB_CODE_NO_RIGHTS; } - code = mgmtUpdateUser(pUser); - mLPrint("user:%s is altered by %s", pAlter->user, pConn->pUser->user); - } else { - code = TSDB_CODE_NO_RIGHTS; + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, code); + return 0; } + code = TSDB_CODE_NO_RIGHTS; taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, code); - return 0; } int mgmtProcessDropUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { SDropUserMsg *pDrop = (SDropUserMsg *)pMsg; int code = 0; + SUserObj * pUser; + SUserObj * pOperUser; if (mgmtCheckRedirectMsg(pConn, TSDB_MSG_TYPE_DROP_USER_RSP) != 0) { return 0; } - if (strcmp(pConn->pUser->user, pDrop->user) == 0) { - code = TSDB_CODE_NO_RIGHTS; - } else if (strcmp(pDrop->user, "monitor") == 0 || strcmp(pDrop->user, "stream") == 0) { + pUser = mgmtGetUser(pDrop->user); + pOperUser = mgmtGetUser(pConn->pUser->user); + + if (pUser == NULL) { + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_USER_RSP, TSDB_CODE_INVALID_USER); + return 0; + } + + if (pOperUser == NULL) { + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_USER_RSP, TSDB_CODE_INVALID_USER); + return 0; + } + + if (strcmp(pUser->user, "monitor") == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) { code = TSDB_CODE_NO_RIGHTS; - } else { - if (pConn->superAuth) { - code = mgmtDropUser(pConn->pAcct, pDrop->user); - if (code == 0) { - mLPrint("user:%s is dropped by %s", pDrop->user, pConn->pUser->user); - } + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_USER_RSP, code); + return 0; + } + + bool hasRight = false; + if (strcmp(pUser->user, "root") == 0) { + hasRight = false; + } else if (strcmp(pOperUser->user, "root") == 0) { + hasRight = true; + } else if (strcmp(pUser->user, pOperUser->user) == 0) { + hasRight = false; + } else if (pOperUser->superAuth) { + if (strcmp(pUser->user, "root") == 0) { + hasRight = false; + } else if (strcmp(pOperUser->acct, pUser->acct) != 0) { + hasRight = false; } else { - code = TSDB_CODE_NO_RIGHTS; + hasRight = true; } } - taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_USER_RSP, code); + if (hasRight) { + code = mgmtDropUser(pConn->pAcct, pDrop->user); + if (code == 0) { + mLPrint("user:%s is dropped by %s", pDrop->user, pConn->pUser->user); + } + } else { + code = TSDB_CODE_NO_RIGHTS; + } + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_USER_RSP, code); return 0; } @@ -755,7 +850,7 @@ int mgmtProcessDropDbMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (!pConn->writeAuth) { code = TSDB_CODE_NO_RIGHTS; } else { - code = mgmtDropDbByName(pConn->pAcct, pDrop->db); + code = mgmtDropDbByName(pConn->pAcct, pDrop->db, pDrop->ignoreNotExists); if (code == 0) { mLPrint("DB:%s is dropped by %s", pDrop->db, pConn->pUser->user); } @@ -781,12 +876,14 @@ int (*mgmtGetMetaFp[])(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) = { mgmtGetAcctMeta, mgmtGetUserMeta, mgmtGetDbMeta, mgmtGetMeterMeta, mgmtGetDnodeMeta, mgmtGetMnodeMeta, mgmtGetVgroupMeta, mgmtGetMetricMeta, mgmtGetModuleMeta, mgmtGetQueryMeta, mgmtGetStreamMeta, mgmtGetConfigMeta, mgmtGetConnsMeta, mgmtGetScoresMeta, grantGetGrantsMeta, + mgmtGetVnodeMeta, }; int (*mgmtRetrieveFp[])(SShowObj *pShow, char *data, int rows, SConnObj *pConn) = { mgmtRetrieveAccts, mgmtRetrieveUsers, mgmtRetrieveDbs, mgmtRetrieveMeters, mgmtRetrieveDnodes, mgmtRetrieveMnodes, mgmtRetrieveVgroups, mgmtRetrieveMetrics, mgmtRetrieveModules, mgmtRetrieveQueries, mgmtRetrieveStreams, mgmtRetrieveConfigs, mgmtRetrieveConns, mgmtRetrieveScores, grantRetrieveGrants, + mgmtRetrieveVnodes, }; int mgmtProcessShowMsg(char *pMsg, int msgLen, SConnObj *pConn) { @@ -831,11 +928,11 @@ int mgmtProcessShowMsg(char *pMsg, int msgLen, SConnObj *pConn) { pShowRsp->qhandle = (uint64_t)pShow; // qhandle; pConn->qhandle = pShowRsp->qhandle; - code = (*mgmtGetMetaFp[pShowMsg->type])(&pShowRsp->meterMeta, pShow, pConn); + code = (*mgmtGetMetaFp[(uint8_t)pShowMsg->type])(&pShowRsp->meterMeta, pShow, pConn); if (code == 0) { pMsg += sizeof(SShowRspMsg) + sizeof(SSchema) * pShow->numOfColumns; } else { - mError("pShow:%p, type:%d %s, failed to get Meta, code:%d", pShow, pShowMsg->type, taosMsg[pShowMsg->type], code); + mError("pShow:%p, type:%d %s, failed to get Meta, code:%d", pShow, pShowMsg->type, taosMsg[(uint8_t)pShowMsg->type], code); free(pShow); } } @@ -873,7 +970,7 @@ int mgmtProcessRetrieveMsg(char *pMsg, int msgLen, SConnObj *pConn) { taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, TSDB_CODE_MEMORY_CORRUPTED); return -1; } else { - if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) == 0) { + if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) { rowsToRead = pShow->numOfRows - pShow->numOfReads; } @@ -905,8 +1002,8 @@ int mgmtProcessRetrieveMsg(char *pMsg, int msgLen, SConnObj *pConn) { pMsg = pRsp->data; // if free flag is set, client wants to clean the resources - if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) == 0) - rowsRead = (*mgmtRetrieveFp[pShow->type])(pShow, pRsp->data, rowsToRead, pConn); + if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) + rowsRead = (*mgmtRetrieveFp[(uint8_t)pShow->type])(pShow, pRsp->data, rowsToRead, pConn); if (rowsRead < 0) { rowsRead = 0; @@ -922,8 +1019,8 @@ int mgmtProcessRetrieveMsg(char *pMsg, int msgLen, SConnObj *pConn) { taosSendMsgToPeer(pConn->thandle, pStart, msgLen); if (rowsToRead == 0) { - int64_t oldSign = __sync_val_compare_and_swap(&pShow->signature, (uint64_t)pShow, 0); - if (oldSign != (uint64_t)pShow) { + uintptr_t oldSign = (uintptr_t)atomic_val_compare_exchange_ptr(&pShow->signature, pShow, 0); + if (oldSign != (uintptr_t)pShow) { return msgLen; } // pShow->signature = 0; @@ -957,17 +1054,33 @@ int mgmtProcessCreateTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { pSchema++; } - if (pConn->pDb) { - code = mgmtCreateMeter(pConn->pDb, pCreate); - if (code == 0) { - mTrace("meter:%s is created by %s", pCreate->meterId, pConn->pUser->user); - // mLPrint("meter:%s is created by %s", pCreate->meterId, pConn->pUser->user); - } + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pDb) { + code = mgmtCreateMeter(pDb, pCreate); } else { code = TSDB_CODE_DB_NOT_SELECTED; } } + if (code == 1) { + //mTrace("table:%s, wait vgroup create finish", pCreate->meterId, code); + } else if (code != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_TABLE_ALREADY_EXIST) { // table already created when the second attempt to create table + + STabObj* pMeter = mgmtGetMeter(pCreate->meterId); + assert(pMeter != NULL); + + mWarn("table:%s, table already created, failed to create table, ts:%lld, code:%d", pCreate->meterId, + pMeter->createdTime, code); + } else { // other errors + mError("table:%s, failed to create table, code:%d", pCreate->meterId, code); + } + } else { + mTrace("table:%s, table is created by %s", pCreate->meterId, pConn->pUser->user); + } + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_TABLE_RSP, code); return 0; @@ -984,7 +1097,10 @@ int mgmtProcessDropTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (!pConn->writeAuth) { code = TSDB_CODE_NO_RIGHTS; } else { - code = mgmtDropMeter(pConn->pDb, pDrop->meterId, pDrop->igNotExists); + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + code = mgmtDropMeter(pDb, pDrop->meterId, pDrop->igNotExists); if (code == 0) { mTrace("meter:%s is dropped by user:%s", pDrop->meterId, pConn->pUser->user); // mLPrint("meter:%s is dropped by user:%s", pDrop->meterId, pConn->pUser->user); @@ -1014,12 +1130,15 @@ int mgmtProcessAlterTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { mError("meter:%s error numOfCols:%d in alter table", pAlter->meterId, pAlter->numOfCols); code = TSDB_CODE_APP_ERROR; } else { - if (pConn->pDb) { + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pDb) { for (int32_t i = 0; i < pAlter->numOfCols; ++i) { pAlter->schema[i].bytes = htons(pAlter->schema[i].bytes); } - code = mgmtAlterMeter(pConn->pDb, pAlter); + code = mgmtAlterMeter(pDb, pAlter); if (code == 0) { mLPrint("meter:%s is altered by %s", pAlter->meterId, pConn->pUser->user); } @@ -1078,10 +1197,17 @@ int mgmtProcessHeartBeatMsg(char *cont, int contLen, SConnObj *pConn) { pHBRsp->killConnection = pConn->killConnection; #ifdef CLUSTER - int size = pSdbPublicIpList->numOfIps * 4; - pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; - memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); - pMsg += sizeof(SHeartBeatRsp) + size; + if (pConn->usePublicIp) { + int size = pSdbPublicIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } else { + int size = pSdbIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } #else pMsg += sizeof(SHeartBeatRsp); #endif @@ -1093,11 +1219,11 @@ int mgmtProcessHeartBeatMsg(char *cont, int contLen, SConnObj *pConn) { } void mgmtEstablishConn(SConnObj *pConn) { - __sync_fetch_and_add(&mgmtShellConns, 1); - __sync_fetch_and_add(&sdbExtConns, 1); + atomic_fetch_add_32(&mgmtShellConns, 1); + atomic_fetch_add_32(&sdbExtConns, 1); pConn->stime = taosGetTimestampMs(); - if (strcmp(pConn->pUser->user, "root") == 0 || strcmp(pConn->pUser->user, pConn->pAcct->user) == 0) { + if (strcmp(pConn->pUser->user, "root") == 0) { pConn->superAuth = 1; pConn->writeAuth = 1; } else { @@ -1108,8 +1234,9 @@ void mgmtEstablishConn(SConnObj *pConn) { } } - uint32_t temp; - taosGetRpcConnInfo(pConn->thandle, &temp, &pConn->ip, &pConn->port, &temp, &temp); + int32_t tempint32; + uint32_t tempuint32; + taosGetRpcConnInfo(pConn->thandle, &tempuint32, &pConn->ip, &pConn->port, &tempint32, &tempint32); mgmtAddConnIntoAcct(pConn); } @@ -1140,7 +1267,7 @@ int mgmtProcessConnectMsg(char *pMsg, int msgLen, SConnObj *pConn) { SAcctObj * pAcct = NULL; SUserObj * pUser = NULL; SDbObj * pDb = NULL; - char dbName[TSDB_METER_ID_LEN]; + char dbName[256] = {0}; pConnectMsg = (SConnectMsg *)pMsg; @@ -1157,8 +1284,13 @@ int mgmtProcessConnectMsg(char *pMsg, int msgLen, SConnObj *pConn) { pAcct = mgmtGetAcct(pUser->acct); + code = taosCheckVersion(pConnectMsg->clientVersion, version, 3); + if (code != 0) { + mError("invalid client version:%s", pConnectMsg->clientVersion); + goto _rsp; + } + if (pConnectMsg->db[0]) { - memset(dbName, 0, sizeof(dbName)); sprintf(dbName, "%x%s%s", pAcct->acctId, TS_PATH_DELIMITER, pConnectMsg->db); pDb = mgmtGetDb(dbName); if (pDb == NULL) { @@ -1169,8 +1301,8 @@ int mgmtProcessConnectMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (pConn->pAcct) { mgmtRemoveConnFromAcct(pConn); - __sync_fetch_and_sub(&mgmtShellConns, 1); - __sync_fetch_and_sub(&sdbExtConns, 1); + atomic_fetch_sub_32(&mgmtShellConns, 1); + atomic_fetch_sub_32(&sdbExtConns, 1); } code = 0; @@ -1178,7 +1310,7 @@ int mgmtProcessConnectMsg(char *pMsg, int msgLen, SConnObj *pConn) { pConn->pDb = pDb; pConn->pUser = pUser; mgmtEstablishConn(pConn); - + _rsp: pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_CONNECT_RSP, 128); if (pStart == NULL) return 0; @@ -1198,7 +1330,11 @@ _rsp: #ifdef CLUSTER int size = pSdbPublicIpList->numOfIps * 4 + sizeof(SIpList); - memcpy(pMsg, pSdbPublicIpList, size); + if (pConn->usePublicIp) { + memcpy(pMsg, pSdbPublicIpList, size); + } else { + memcpy(pMsg, pSdbIpList, size); + } pMsg += size; #endif @@ -1228,8 +1364,8 @@ void *mgmtProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { if (msg == NULL) { if (pConn) { mgmtRemoveConnFromAcct(pConn); - __sync_fetch_and_sub(&mgmtShellConns, 1); - __sync_fetch_and_sub(&sdbExtConns, 1); + atomic_fetch_sub_32(&mgmtShellConns, 1); + atomic_fetch_sub_32(&sdbExtConns, 1); mTrace("connection from %s is closed", pConn->pUser->user); memset(pConn, 0, sizeof(SConnObj)); } @@ -1248,6 +1384,9 @@ void *mgmtProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { pConn = connList + pMsg->destId; pConn->thandle = thandle; strcpy(pConn->user, pMsg->meterId); + pConn->usePublicIp = (pMsg->destIp == tsPublicIpInt ? 1 : 0); + mTrace("pConn:%p is rebuild, destIp:%s publicIp:%s usePublicIp:%u", + pConn, taosIpStr(pMsg->destIp), taosIpStr(tsPublicIpInt), pConn->usePublicIp); } if (pMsg->msgType == TSDB_MSG_TYPE_CONNECT) { @@ -1259,13 +1398,12 @@ void *mgmtProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { if (pConn->pUser) { pConn->pAcct = mgmtGetAcct(pConn->pUser->acct); mgmtEstablishConn(pConn); - mTrace("login from:%x:%d", pConn->ip, htons(pConn->port)); + mTrace("login from:%x:%hu", pConn->ip, htons(pConn->port)); } } if (pConn->pAcct) { - if (pConn->pDb == NULL || - strncmp(pConn->pDb->name, pHead->db, tListLen(pConn->pDb->name)) != 0) { + if (pConn->pDb == NULL || strncmp(pConn->pDb->name, pHead->db, tListLen(pConn->pDb->name)) != 0) { pConn->pDb = mgmtGetDb(pHead->db); } @@ -1339,5 +1477,3 @@ void mgmtInitProcessShellMsg() { mgmtProcessShellMsg[TSDB_MSG_TYPE_KILL_STREAM] = mgmtProcessKillStreamMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_KILL_CONNECTION] = mgmtProcessKillConnectionMsg; } - -#pragma GCC diagnostic pop diff --git a/src/system/detail/src/mgmtSupertableQuery.c b/src/system/detail/src/mgmtSupertableQuery.c index 31f3e29425b1842722d8eca374b0a5bf7b4fb784..f83ffd42477c17c8c20b67aed0956fd1e15737eb 100644 --- a/src/system/detail/src/mgmtSupertableQuery.c +++ b/src/system/detail/src/mgmtSupertableQuery.c @@ -14,10 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" #include "mgmt.h" #include "mgmtUtil.h" @@ -59,7 +56,7 @@ static int32_t tabObjVGIDComparator(const void* pLeft, const void* pRight) { // monotonic inc in memory address static int32_t tabObjPointerComparator(const void* pLeft, const void* pRight) { - int64_t ret = (int64_t)pLeft - (int64_t)pRight; + int64_t ret = (*(STabObj**)(pLeft))->uid - (*(STabObj**)(pRight))->uid; if (ret == 0) { return 0; } else { @@ -103,6 +100,32 @@ static int32_t tabObjResultComparator(const void* p1, const void* p2, void* para return 0; } +/** + * update the tag order index according to the tags column index. The tags column index needs to be checked one-by-one, + * since the normal columns may be passed to server for handling the group by on status column. + * + * @param pMetricMetaMsg + * @param tableIndex + * @param pOrderIndexInfo + * @param numOfTags + */ +static void mgmtUpdateOrderTagColIndex(SMetricMetaMsg* pMetricMetaMsg, int32_t tableIndex, tOrderIdx* pOrderIndexInfo, + int32_t numOfTags) { + SMetricMetaElemMsg* pElem = (SMetricMetaElemMsg*)((char*)pMetricMetaMsg + pMetricMetaMsg->metaElem[tableIndex]); + SColIndexEx* groupColumnList = (SColIndexEx*)((char*)pMetricMetaMsg + pElem->groupbyTagColumnList); + + int32_t numOfGroupbyTags = 0; + for (int32_t i = 0; i < pElem->numOfGroupCols; ++i) { + if (groupColumnList[i].flag == TSDB_COL_TAG) { // ignore this column if it is not a tag column. + pOrderIndexInfo->pData[numOfGroupbyTags++] = groupColumnList[i].colIdx; + + assert(groupColumnList[i].colIdx < numOfTags); + } + } + + pOrderIndexInfo->numOfOrderedCols = numOfGroupbyTags; +} + // todo merge sort function with losertree used void mgmtReorganizeMetersInMetricMeta(SMetricMetaMsg* pMetricMetaMsg, int32_t tableIndex, tQueryResultset* pRes) { if (pRes->num <= 0) { // no result, no need to pagination @@ -125,13 +148,9 @@ void mgmtReorganizeMetersInMetricMeta(SMetricMetaMsg* pMetricMetaMsg, int32_t ta int32_t* startPos = NULL; int32_t numOfSubset = 1; - - if (pElem->numOfGroupCols > 0) { - SColIndexEx* groupColumnList = (SColIndexEx*)((char*)pMetricMetaMsg + pElem->groupbyTagColumnList); - for (int32_t i = 0; i < pElem->numOfGroupCols; ++i) { - descriptor->orderIdx.pData[i] = groupColumnList[i].colIdx; - } - + + mgmtUpdateOrderTagColIndex(pMetricMetaMsg, tableIndex, &descriptor->orderIdx, pMetric->numOfTags); + if (descriptor->orderIdx.numOfOrderedCols > 0) { tQSortEx(pRes->pRes, POINTER_BYTES, 0, pRes->num - 1, descriptor, tabObjResultComparator); startPos = calculateSubGroup(pRes->pRes, pRes->num, &numOfSubset, descriptor, tabObjResultComparator); } else { @@ -199,14 +218,14 @@ static bool mgmtTablenameFilterCallback(tSkipListNode* pNode, void* param) { // pattern compare for meter name STabObj* pMeterObj = (STabObj*)pNode->pData; - extractMeterName(pMeterObj->meterId, name); + extractTableName(pMeterObj->meterId, name); return patternMatch(pSupporter->pattern, name, TSDB_METER_ID_LEN, &pSupporter->info) == TSDB_PATTERN_MATCH; } static void mgmtRetrieveFromLikeOptr(tQueryResultset* pRes, const char* str, STabObj* pMetric) { SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; - SMeterNameFilterSupporter supporter = {info, str}; + SMeterNameFilterSupporter supporter = {info, (char*) str}; pRes->num = tSkipListIterateList(pMetric->pSkipList, (tSkipListNode***)&pRes->pRes, mgmtTablenameFilterCallback, &supporter); @@ -233,7 +252,7 @@ static void mgmtFilterByTableNameCond(tQueryResultset* pRes, char* condStr, int3 free(str); } -static bool mgmtJoinFilterCallback(tSkipListNode* pNode, void* param) { +UNUSED_FUNC static bool mgmtJoinFilterCallback(tSkipListNode* pNode, void* param) { SJoinSupporter* pSupporter = (SJoinSupporter*)param; SSchema s = {0}; @@ -430,11 +449,11 @@ static tQueryResultset* doNestedLoopIntersect(tQueryResultset* pRes1, tQueryResu } static tQueryResultset* doSortIntersect(tQueryResultset* pRes1, tQueryResultset* pRes2) { - size_t sizePtr = sizeof(void*); - + size_t sizePtr = sizeof(void *); + qsort(pRes1->pRes, pRes1->num, sizePtr, tabObjPointerComparator); qsort(pRes2->pRes, pRes2->num, sizePtr, tabObjPointerComparator); - + int32_t i = 0; int32_t j = 0; @@ -642,7 +661,8 @@ static void getTagColumnInfo(SSyntaxTreeFilterSupporter* pSupporter, SSchema* pS } } -void filterPrepare(tSQLBinaryExpr* pExpr, void* param) { +void filterPrepare(void* expr, void* param) { + tSQLBinaryExpr *pExpr = (tSQLBinaryExpr*) expr; if (pExpr->info != NULL) { return; } @@ -694,7 +714,9 @@ static int32_t mgmtFilterMeterByIndex(STabObj* pMetric, tQueryResultset* pRes, c return TSDB_CODE_OPS_NOT_SUPPORT; } else { // query according to the binary expression SSyntaxTreeFilterSupporter s = {.pTagSchema = pTagSchema, .numOfTags = pMetric->numOfTags}; - SBinaryFilterSupp supp = {.fp = tSkipListNodeFilterCallback, .setupInfoFn = filterPrepare, .pExtInfo = &s}; + SBinaryFilterSupp supp = {.fp = (__result_filter_fn_t)tSkipListNodeFilterCallback, + .setupInfoFn = (__do_filter_suppl_fn_t)filterPrepare, + .pExtInfo = &s}; tSQLBinaryExprTraverse(pExpr, pMetric->pSkipList, pRes, &supp); tSQLBinaryExprDestroy(&pExpr, tSQLListTraverseDestroyInfo); @@ -784,22 +806,25 @@ int mgmtRetrieveMetersFromMetric(SMetricMetaMsg* pMsg, int32_t tableIndex, tQuer } // todo refactor!!!!! -static char* getTagValueFromMeter(STabObj* pMeter, int32_t offset, void* param) { +static char* getTagValueFromMeter(STabObj* pMeter, int32_t offset, int32_t len, char* param) { if (offset == TSDB_TBNAME_COLUMN_INDEX) { - extractMeterName(pMeter->meterId, param); - return param; + extractTableName(pMeter->meterId, param); } else { - char* tags = pMeter->pTagData + TSDB_METER_ID_LEN; // tag start position - return (tags + offset); + char* tags = pMeter->pTagData + offset + TSDB_METER_ID_LEN; // tag start position + memcpy(param, tags, len); // make sure the value is null-terminated string } + + return param; } -bool tSkipListNodeFilterCallback(tSkipListNode* pNode, void* param) { +bool tSkipListNodeFilterCallback(const void* pNode, void* param) { + tQueryInfo* pInfo = (tQueryInfo*)param; - STabObj* pMeter = (STabObj*)pNode->pData; + STabObj* pMeter = (STabObj*)(((tSkipListNode*)pNode)->pData); - char name[TSDB_METER_NAME_LEN + 1] = {0}; - char* val = getTagValueFromMeter(pMeter, pInfo->offset, name); + char buf[TSDB_MAX_TAGS_LEN] = {0}; + + char* val = getTagValueFromMeter(pMeter, pInfo->offset, pInfo->sch.bytes, buf); int8_t type = pInfo->sch.type; int32_t ret = 0; diff --git a/src/system/detail/src/mgmtSystem.c b/src/system/detail/src/mgmtSystem.c index bb05c35e9be40fcb338395fb7f1086d99aff4ac1..375e100a83668f0288fb25fc05f689b3e52b0c91 100644 --- a/src/system/detail/src/mgmtSystem.c +++ b/src/system/detail/src/mgmtSystem.c @@ -14,16 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "dnodeSystem.h" #include "mgmt.h" diff --git a/src/system/detail/src/mgmtUser.c b/src/system/detail/src/mgmtUser.c index 83e619841b18b44aeac999ba1d60c5a8672e1531..89b83e3553f26a3d2cc755709086d8f5c4fa7f3f 100644 --- a/src/system/detail/src/mgmtUser.c +++ b/src/system/detail/src/mgmtUser.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include +#include "os.h" #include "mgmt.h" #include "tschemautil.h" @@ -54,8 +54,8 @@ void mgmtUserActionInit() { } void *mgmtUserAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtUserActionFp[action] != NULL) { - return (*(mgmtUserActionFp[action]))(row, str, size, ssize); + if (mgmtUserActionFp[(uint8_t)action] != NULL) { + return (*(mgmtUserActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } diff --git a/src/system/detail/src/mgmtUtil.c b/src/system/detail/src/mgmtUtil.c index a427771e0f900c3a19bb13dbc45897b3190d7f15..98978767ce73b957a12bebbc945f606bad8ec086 100644 --- a/src/system/detail/src/mgmtUtil.c +++ b/src/system/detail/src/mgmtUtil.c @@ -14,10 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" #include "mgmt.h" #include "mgmtUtil.h" @@ -87,3 +84,84 @@ int32_t mgmtGetTagsLength(STabObj* pMetric, int32_t col) { // length before col return len; } + +bool mgmtCheckIsMonitorDB(char *db, char *monitordb) { + char dbName[TSDB_DB_NAME_LEN + 1] = {0}; + extractDBName(db, dbName); + + size_t len = strlen(dbName); + return (strncasecmp(dbName, monitordb, len) == 0 && len == strlen(monitordb)); +} + +int32_t mgmtCheckDBParams(SCreateDbMsg *pCreate) { + if (pCreate->commitLog < 0 || pCreate->commitLog > 1) { + mError("invalid db option commitLog: %d, only 0 or 1 allowed", pCreate->commitLog); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->replications < TSDB_REPLICA_MIN_NUM || pCreate->replications > TSDB_REPLICA_MAX_NUM) { + mError("invalid db option replications: %d valid range: [%d, %d]", pCreate->replications, TSDB_REPLICA_MIN_NUM, + TSDB_REPLICA_MAX_NUM); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->daysPerFile < TSDB_FILE_MIN_PARTITION_RANGE || pCreate->daysPerFile > TSDB_FILE_MAX_PARTITION_RANGE) { + mError("invalid db option daysPerFile: %d valid range: [%d, %d]", pCreate->daysPerFile, TSDB_FILE_MIN_PARTITION_RANGE, + TSDB_FILE_MAX_PARTITION_RANGE); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->daysToKeep1 > pCreate->daysToKeep2 || pCreate->daysToKeep2 > pCreate->daysToKeep) { + mError("invalid db option daystokeep1: %d, daystokeep2: %d, daystokeep: %d", pCreate->daysToKeep1, + pCreate->daysToKeep2, pCreate->daysToKeep); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->daysToKeep1 < TSDB_FILE_MIN_PARTITION_RANGE || pCreate->daysToKeep1 < pCreate->daysPerFile) { + mError("invalid db option daystokeep: %d", pCreate->daysToKeep); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->rowsInFileBlock < TSDB_MIN_ROWS_IN_FILEBLOCK || pCreate->rowsInFileBlock > TSDB_MAX_ROWS_IN_FILEBLOCK) { + mError("invalid db option rowsInFileBlock: %d valid range: [%d, %d]", pCreate->rowsInFileBlock, + TSDB_MIN_ROWS_IN_FILEBLOCK, TSDB_MAX_ROWS_IN_FILEBLOCK); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->cacheBlockSize < TSDB_MIN_CACHE_BLOCK_SIZE || pCreate->cacheBlockSize > TSDB_MAX_CACHE_BLOCK_SIZE) { + mError("invalid db option cacheBlockSize: %d valid range: [%d, %d]", pCreate->cacheBlockSize, + TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MAX_CACHE_BLOCK_SIZE); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->maxSessions < TSDB_MIN_TABLES_PER_VNODE || pCreate->maxSessions > TSDB_MAX_TABLES_PER_VNODE) { + mError("invalid db option maxSessions: %d valid range: [%d, %d]", pCreate->maxSessions, TSDB_MIN_TABLES_PER_VNODE, + TSDB_MAX_TABLES_PER_VNODE); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) { + mError("invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision, TSDB_TIME_PRECISION_MILLI, + TSDB_TIME_PRECISION_MICRO); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->cacheNumOfBlocks.fraction < TSDB_MIN_AVG_BLOCKS || pCreate->cacheNumOfBlocks.fraction > TSDB_MAX_AVG_BLOCKS) { + mError("invalid db option ablocks: %f valid value: [%d, %d]", pCreate->cacheNumOfBlocks.fraction, 0, TSDB_MAX_AVG_BLOCKS); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->commitTime < TSDB_MIN_COMMIT_TIME_INTERVAL || pCreate->commitTime > TSDB_MAX_COMMIT_TIME_INTERVAL) { + mError("invalid db option commitTime: %d valid range: [%d, %d]", pCreate->commitTime, TSDB_MIN_COMMIT_TIME_INTERVAL, + TSDB_MAX_COMMIT_TIME_INTERVAL); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->compression < TSDB_MIN_COMPRESSION_LEVEL || pCreate->compression > TSDB_MAX_COMPRESSION_LEVEL) { + mError("invalid db option compression: %d valid range: [%d, %d]", pCreate->compression, TSDB_MIN_COMPRESSION_LEVEL, + TSDB_MAX_COMPRESSION_LEVEL); + return TSDB_CODE_INVALID_OPTION; + } + + return TSDB_CODE_SUCCESS; +} diff --git a/src/system/detail/src/mgmtVgroup.c b/src/system/detail/src/mgmtVgroup.c index 5858be54cab7e5394d994427d5105f62f8331e02..c9052c094b737842ce6ceca73e44ca237cbc8ad8 100644 --- a/src/system/detail/src/mgmtVgroup.c +++ b/src/system/detail/src/mgmtVgroup.c @@ -14,11 +14,12 @@ */ #define _DEFAULT_SOURCE -#include +#include "os.h" #include "mgmt.h" #include "tschemautil.h" #include "tlog.h" +#include "vnodeStatus.h" void * vgSdb = NULL; int tsVgUpdateSize; @@ -39,6 +40,7 @@ void *mgmtVgroupActionAfterBatchUpdate(void *row, char *str, int size, int *ssiz void *mgmtVgroupActionReset(void *row, char *str, int size, int *ssize); void *mgmtVgroupActionDestroy(void *row, char *str, int size, int *ssize); bool mgmtCheckVnodeReady(SDnodeObj *pDnode, SVgObj *pVgroup, SVnodeGid *pVnode); +char *mgmtGetVnodeStatus(SVgObj *pVgroup, SVnodeGid *pVnode); void mgmtVgroupActionInit() { mgmtVgroupActionFp[SDB_TYPE_INSERT] = mgmtVgroupActionInsert; @@ -54,8 +56,8 @@ void mgmtVgroupActionInit() { } void *mgmtVgroupAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtVgroupActionFp[action] != NULL) { - return (*(mgmtVgroupActionFp[action]))(row, str, size, ssize); + if (mgmtVgroupActionFp[(uint8_t)action] != NULL) { + return (*(mgmtVgroupActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } @@ -101,13 +103,17 @@ int mgmtInitVgroups() { } taosIdPoolReinit(pVgroup->idPool); -#ifdef CLUSTER - if (pVgroup->vnodeGid[0].publicIp == 0) { - pVgroup->vnodeGid[0].publicIp = inet_addr(tsPublicIp); - pVgroup->vnodeGid[0].ip = inet_addr(tsPrivateIp); - sdbUpdateRow(vgSdb, pVgroup, tsVgUpdateSize, 1); + + if (tsIsCluster) { + /* + * Upgrade from open source version to cluster version for the first time + */ + if (pVgroup->vnodeGid[0].publicIp == 0) { + pVgroup->vnodeGid[0].publicIp = inet_addr(tsPublicIp); + pVgroup->vnodeGid[0].ip = inet_addr(tsPrivateIp); + sdbUpdateRow(vgSdb, pVgroup, tsVgUpdateSize, 1); + } } -#endif mgmtSetDnodeVgid(pVgroup->vnodeGid, pVgroup->numOfVnodes, pVgroup->vgId); } @@ -123,7 +129,7 @@ void mgmtProcessVgTimer(void *handle, void *tmrId) { if (pDb == NULL) return; if (pDb->vgStatus > TSDB_VG_STATUS_IN_PROGRESS) { - mTrace("db:%s, set vgstatus from %d to %d", pDb->name, pDb->vgStatus, TSDB_VG_STATUS_READY); + mTrace("db:%s, set vgroup status from %d to ready", pDb->name, pDb->vgStatus); pDb->vgStatus = TSDB_VG_STATUS_READY; } @@ -143,7 +149,7 @@ SVgObj *mgmtCreateVgroup(SDbObj *pDb) { // based on load balance, create a new one if (mgmtAllocVnodes(pVgroup) != 0) { - mError("no enough free dnode"); + mError("db:%s, no enough free dnode to alloc %d vnodes", pDb->name, pVgroup->numOfVnodes); free(pVgroup); pDb->vgStatus = TSDB_VG_STATUS_FULL; taosTmrReset(mgmtProcessVgTimer, 5000, pDb, mgmtTmr, &pDb->vgTimer); @@ -152,9 +158,9 @@ SVgObj *mgmtCreateVgroup(SDbObj *pDb) { sdbInsertRow(vgSdb, pVgroup, 0); - mTrace("vgroup:%d, db:%s replica:%d is created", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes); + mTrace("vgroup:%d, vgroup is created, db:%s replica:%d", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes); for (int i = 0; i < pVgroup->numOfVnodes; ++i) - mTrace("dnode:%s, vgroup:%d, vnode:%d is created", taosIpStr(pVgroup->vnodeGid[i].ip), pVgroup->vgId, pVgroup->vnodeGid[i].vnode); + mTrace("vgroup:%d, dnode:%s vnode:%d is created", pVgroup->vgId, taosIpStr(pVgroup->vnodeGid[i].ip), pVgroup->vnodeGid[i].vnode); mgmtSendVPeersMsg(pVgroup); @@ -206,7 +212,10 @@ void mgmtCleanUpVgroups() { sdbCloseTable(vgSdb); } int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { int cols = 0; - if (pConn->pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + + if (pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; SSchema *pSchema = tsGetSchema(pMeta); @@ -229,7 +238,7 @@ int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { cols++; int maxReplica = 0; - SVgObj *pVgroup = pConn->pDb->pHead; + SVgObj *pVgroup = pDb->pHead; while (pVgroup != NULL) { maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; pVgroup = pVgroup->next; @@ -267,8 +276,8 @@ int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { pShow->offset[0] = 0; for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; - pShow->numOfRows = pConn->pDb->numOfVgroups; - pShow->pNode = pConn->pDb->pHead; + pShow->numOfRows = pDb->numOfVgroups; + pShow->pNode = pDb->pHead; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; return 0; @@ -282,7 +291,12 @@ int mgmtRetrieveVgroups(SShowObj *pShow, char *data, int rows, SConnObj *pConn) char ipstr[20]; int maxReplica = 0; - pVgroup = pConn->pDb->pHead; + + SDbObj *pDb = NULL; + if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + assert(pDb != NULL); + + pVgroup = pDb->pHead; while (pVgroup != NULL) { maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; pVgroup = pVgroup->next; @@ -305,7 +319,7 @@ int mgmtRetrieveVgroups(SShowObj *pShow, char *data, int rows, SConnObj *pConn) cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - strcpy(pWrite, pVgroup->lbState ? "updating" : "ready"); + strcpy(pWrite, taosGetVgroupLbStatusStr(pVgroup->lbStatus)); cols++; for (int i = 0; i < maxReplica; ++i) { @@ -320,8 +334,8 @@ int mgmtRetrieveVgroups(SShowObj *pShow, char *data, int rows, SConnObj *pConn) pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; if (pVgroup->vnodeGid[i].ip != 0) { - bool ready = mgmtCheckVnodeReady(NULL, pVgroup, pVgroup->vnodeGid + i); - strcpy(pWrite, ready ? "ready" : "unsynced"); + char *vnodeStatus = mgmtGetVnodeStatus(pVgroup, pVgroup->vnodeGid + i); + strcpy(pWrite, vnodeStatus); } else { strcpy(pWrite, "null"); } diff --git a/src/system/detail/src/vnodeCache.c b/src/system/detail/src/vnodeCache.c index 8b51bc460923fa5dee6bad1c4d75e91789d59cec..36bf87210927a7280a425ee5c4c20af65b0a78c0 100644 --- a/src/system/detail/src/vnodeCache.c +++ b/src/system/detail/src/vnodeCache.c @@ -14,14 +14,13 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include +#include "os.h" #include "taosmsg.h" #include "vnode.h" #include "vnodeCache.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" void vnodeSearchPointInCache(SMeterObj *pObj, SQuery *pQuery); void vnodeProcessCommitTimer(void *param, void *tmrId); @@ -79,7 +78,7 @@ void *vnodeOpenCachePool(int vnode) { } } - dTrace("vid:%d, cache pool is allocated:0x%x", vnode, pCachePool); + dPrint("vid:%d, cache pool is allocated:0x%x", vnode, pCachePool); return pCachePool; @@ -104,7 +103,7 @@ void vnodeCloseCachePool(int vnode) { taosTmrStopA(&pVnode->commitTimer); if (pVnode->commitInProcess) pthread_cancel(pVnode->commitThread); - dTrace("vid:%d, cache pool closed, count:%d", vnode, pCachePool->count); + dPrint("vid:%d, cache pool closed, count:%d", vnode, pCachePool->count); int maxAllocBlock = (1024 * 1024 * 1024) / pVnode->cfg.cacheBlockSize; while (blockId < pVnode->cfg.cacheNumOfBlocks.totalBlocks) { @@ -174,6 +173,7 @@ int vnodeFreeCacheBlock(SCacheBlock *pCacheBlock) { SCachePool *pPool = (SCachePool *)vnodeList[pObj->vnode].pCachePool; if (pCacheBlock->notFree) { pPool->notFreeSlots--; + pInfo->unCommittedBlocks--; dTrace("vid:%d sid:%d id:%s, cache block is not free, slot:%d, index:%d notFreeSlots:%d", pObj->vnode, pObj->sid, pObj->meterId, pCacheBlock->slot, pCacheBlock->index, pPool->notFreeSlots); } @@ -256,7 +256,7 @@ void vnodeUpdateCommitInfo(SMeterObj *pObj, int slot, int pos, uint64_t count) { tslot = (tslot + 1) % pInfo->maxBlocks; } - __sync_fetch_and_add(&pObj->freePoints, pObj->pointsPerBlock * slots); + atomic_fetch_add_32(&pObj->freePoints, pObj->pointsPerBlock * slots); pInfo->commitSlot = slot; pInfo->commitPoint = pos; pObj->commitCount = count; @@ -298,7 +298,7 @@ pthread_t vnodeCreateCommitThread(SVnodeObj *pVnode) { taosTmrStopA(&pVnode->commitTimer); - if (pVnode->status == TSDB_STATUS_UNSYNCED) { + if (pVnode->vnodeStatus == TSDB_VN_STATUS_UNSYNCED) { taosTmrReset(vnodeProcessCommitTimer, pVnode->cfg.commitTime * 1000, pVnode, vnodeTmrCtrl, &pVnode->commitTimer); dTrace("vid:%d, it is in unsyc state, commit later", pVnode->vnode); return pVnode->commitThread; @@ -372,13 +372,60 @@ void vnodeCancelCommit(SVnodeObj *pVnode) { taosTmrReset(vnodeProcessCommitTimer, pVnode->cfg.commitTime * 1000, pVnode, vnodeTmrCtrl, &pVnode->commitTimer); } +/* The vnode cache lock should be hold before calling this interface + */ +SCacheBlock *vnodeGetFreeCacheBlock(SVnodeObj *pVnode) { + SCachePool *pPool = (SCachePool *)(pVnode->pCachePool); + SVnodeCfg *pCfg = &(pVnode->cfg); + SCacheBlock *pCacheBlock = NULL; + int skipped = 0; + + while (1) { + pCacheBlock = (SCacheBlock *)(pPool->pMem[((int64_t)pPool->freeSlot)]); + if (pCacheBlock->blockId == 0) break; + + if (pCacheBlock->notFree) { + pPool->freeSlot++; + pPool->freeSlot = pPool->freeSlot % pCfg->cacheNumOfBlocks.totalBlocks; + skipped++; + if (skipped > pPool->threshold) { + vnodeCreateCommitThread(pVnode); + pthread_mutex_unlock(&pPool->vmutex); + dError("vid:%d committing process is too slow, notFreeSlots:%d....", pVnode->vnode, pPool->notFreeSlots); + return NULL; + } + } else { + SMeterObj * pRelObj = pCacheBlock->pMeterObj; + SCacheInfo *pRelInfo = (SCacheInfo *)pRelObj->pCache; + int firstSlot = (pRelInfo->currentSlot - pRelInfo->numOfBlocks + 1 + pRelInfo->maxBlocks) % pRelInfo->maxBlocks; + pCacheBlock = pRelInfo->cacheBlocks[firstSlot]; + if (pCacheBlock) { + pPool->freeSlot = pCacheBlock->index; + vnodeFreeCacheBlock(pCacheBlock); + break; + } else { + pPool->freeSlot = (pPool->freeSlot + 1) % pCfg->cacheNumOfBlocks.totalBlocks; + skipped++; + } + } + } + + pCacheBlock = (SCacheBlock *)(pPool->pMem[pPool->freeSlot]); + pCacheBlock->index = pPool->freeSlot; + pCacheBlock->notFree = 1; + pPool->freeSlot = (pPool->freeSlot + 1) % pCfg->cacheNumOfBlocks.totalBlocks; + pPool->notFreeSlots++; + + return pCacheBlock; +} + int vnodeAllocateCacheBlock(SMeterObj *pObj) { int index; SCachePool * pPool; SCacheBlock *pCacheBlock; SCacheInfo * pInfo; SVnodeObj * pVnode; - int skipped = 0, commit = 0; + int commit = 0; pVnode = vnodeList + pObj->vnode; pPool = (SCachePool *)pVnode->pCachePool; @@ -406,45 +453,10 @@ int vnodeAllocateCacheBlock(SMeterObj *pObj) { return -1; } - while (1) { - pCacheBlock = (SCacheBlock *)(pPool->pMem[((int64_t)pPool->freeSlot)]); - if (pCacheBlock->blockId == 0) break; - - if (pCacheBlock->notFree) { - pPool->freeSlot++; - pPool->freeSlot = pPool->freeSlot % pCfg->cacheNumOfBlocks.totalBlocks; - skipped++; - if (skipped > pPool->threshold) { - vnodeCreateCommitThread(pVnode); - pthread_mutex_unlock(&pPool->vmutex); - dError("vid:%d sid:%d id:%s, committing process is too slow, notFreeSlots:%d....", - pObj->vnode, pObj->sid, pObj->meterId, pPool->notFreeSlots); - return -1; - } - } else { - SMeterObj *pRelObj = pCacheBlock->pMeterObj; - SCacheInfo *pRelInfo = (SCacheInfo *)pRelObj->pCache; - int firstSlot = (pRelInfo->currentSlot - pRelInfo->numOfBlocks + 1 + pRelInfo->maxBlocks) % pRelInfo->maxBlocks; - pCacheBlock = pRelInfo->cacheBlocks[firstSlot]; - if (pCacheBlock) { - pPool->freeSlot = pCacheBlock->index; - vnodeFreeCacheBlock(pCacheBlock); - break; - } else { - pPool->freeSlot = (pPool->freeSlot + 1) % pCfg->cacheNumOfBlocks.totalBlocks; - skipped++; - } - } - } - - index = pPool->freeSlot; - pPool->freeSlot++; - pPool->freeSlot = pPool->freeSlot % pCfg->cacheNumOfBlocks.totalBlocks; - pPool->notFreeSlots++; + if ((pCacheBlock = vnodeGetFreeCacheBlock(pVnode)) == NULL) return -1; + index = pCacheBlock->index; pCacheBlock->pMeterObj = pObj; - pCacheBlock->notFree = 1; - pCacheBlock->index = index; pCacheBlock->offset[0] = ((char *)(pCacheBlock)) + sizeof(SCacheBlock) + pObj->numOfColumns * sizeof(char *); for (int col = 1; col < pObj->numOfColumns; ++col) @@ -505,7 +517,7 @@ int vnodeInsertPointToCache(SMeterObj *pObj, char *pData) { pData += pObj->schema[col].bytes; } - __sync_fetch_and_sub(&pObj->freePoints, 1); + atomic_fetch_sub_32(&pObj->freePoints, 1); pCacheBlock->numOfPoints++; pPool->count++; @@ -1114,7 +1126,7 @@ int vnodeSyncRestoreCache(int vnode, int fd) { for (int col = 0; col < pObj->numOfColumns; ++col) if (taosReadMsg(fd, pBlock->offset[col], pObj->schema[col].bytes * points) <= 0) return -1; - __sync_fetch_and_sub(&pObj->freePoints, points); + atomic_fetch_sub_32(&pObj->freePoints, points); blocksReceived++; pointsReceived += points; pObj->lastKey = *((TSKEY *)(pBlock->offset[0] + pObj->schema[0].bytes * (points - 1))); diff --git a/src/system/detail/src/vnodeCommit.c b/src/system/detail/src/vnodeCommit.c index af14f2be4a8fb56c305967a66c2e94b8cc864873..b5c9f8074536d1b18061b124f70f93e064e2b316 100644 --- a/src/system/detail/src/vnodeCommit.c +++ b/src/system/detail/src/vnodeCommit.c @@ -14,19 +14,12 @@ */ #define _GNU_SOURCE /* See feature_test_macros(7) */ -#include - -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "tsdb.h" #include "vnode.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" typedef struct { int sversion; @@ -89,7 +82,7 @@ int vnodeRenewCommitLog(int vnode) { pthread_mutex_lock(&(pVnode->logMutex)); - if (VALIDFD(pVnode->logFd)) { + if (FD_VALID(pVnode->logFd)) { munmap(pVnode->pMem, pVnode->mappingSize); close(pVnode->logFd); rename(fileName, oldName); @@ -173,7 +166,7 @@ size_t vnodeRestoreDataFromLog(int vnode, char *fileName, uint64_t *firstV) { continue; } - if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dWarn("vid:%d sid:%d id:%s, meter is dropped, ignore data in commit log, contLen:%d action:%d", vnode, head.sid, head.contLen, head.action); continue; @@ -243,7 +236,7 @@ int vnodeInitCommit(int vnode) { } pVnode->pWrite += size; - dTrace("vid:%d, commit log is initialized", vnode); + dPrint("vid:%d, commit log is initialized", vnode); return 0; } @@ -251,7 +244,7 @@ int vnodeInitCommit(int vnode) { void vnodeCleanUpCommit(int vnode) { SVnodeObj *pVnode = vnodeList + vnode; - if (VALIDFD(pVnode->logFd)) close(pVnode->logFd); + if (FD_VALID(pVnode->logFd)) close(pVnode->logFd); if (pVnode->cfg.commitLog && (pVnode->logFd > 0 && remove(pVnode->logFn) < 0)) { dError("vid:%d, failed to remove:%s", vnode, pVnode->logFn); diff --git a/src/system/detail/src/vnodeFile.c b/src/system/detail/src/vnodeFile.c index df94c883ace04237577bb905fe0b2b758fd7f2ab..7597659e5308a077dd18054a323ae8ff44a24318 100644 --- a/src/system/detail/src/vnodeFile.c +++ b/src/system/detail/src/vnodeFile.c @@ -14,21 +14,14 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "tscompression.h" #include "tutil.h" #include "vnode.h" #include "vnodeFile.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" #define FILE_QUERY_NEW_BLOCK -5 // a special negative number @@ -103,8 +96,8 @@ void vnodeGetDnameFromLname(char *lhead, char *ldata, char *llast, char *dhead, } void vnodeGetHeadTname(char *nHeadName, char *nLastName, int vnode, int fileId) { - sprintf(nHeadName, "%s/vnode%d/db/v%df%d.t", tsDirectory, vnode, vnode, fileId); - sprintf(nLastName, "%s/vnode%d/db/v%df%d.l", tsDirectory, vnode, vnode, fileId); + if (nHeadName != NULL) sprintf(nHeadName, "%s/vnode%d/db/v%df%d.t", tsDirectory, vnode, vnode, fileId); + if (nLastName != NULL) sprintf(nLastName, "%s/vnode%d/db/v%df%d.l", tsDirectory, vnode, vnode, fileId); } void vnodeCreateDataDirIfNeeded(int vnode, char *path) { @@ -122,6 +115,7 @@ int vnodeCreateHeadDataFile(int vnode, int fileId, char *headName, char *dataNam char *path = vnodeGetDataDir(vnode, fileId); if (path == NULL) { + dError("vid:%d, fileId:%d, failed to get dataDir", vnode, fileId); return -1; } @@ -133,10 +127,8 @@ int vnodeCreateHeadDataFile(int vnode, int fileId, char *headName, char *dataNam if (symlink(dDataName, dataName) != 0) return -1; if (symlink(dLastName, lastName) != 0) return -1; - dTrace( - "vid:%d, fileId:%d, empty header file:%s dataFile:%s lastFile:%s on " - "disk:%s is created ", - vnode, fileId, headName, dataName, lastName, path); + dPrint("vid:%d, fileId:%d, empty header file:%s dataFile:%s lastFile:%s on disk:%s is created ", + vnode, fileId, headName, dataName, lastName, path); return 0; } @@ -189,40 +181,36 @@ int vnodeCreateEmptyCompFile(int vnode, int fileId) { return 0; } -int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { - char name[TSDB_FILENAME_LEN]; - char dHeadName[TSDB_FILENAME_LEN] = "\0"; - char dLastName[TSDB_FILENAME_LEN] = "\0"; - int len = 0; - struct stat filestat; - int vnode = pVnode->vnode; - int fileId, numOfFiles, filesAdded = 0; - SVnodeCfg * pCfg = &pVnode->cfg; +int vnodeCreateNeccessaryFiles(SVnodeObj *pVnode) { + int numOfFiles = 0, fileId, filesAdded = 0; + int vnode = pVnode->vnode; + SVnodeCfg *pCfg = &(pVnode->cfg); if (pVnode->lastKeyOnFile == 0) { if (pCfg->daysPerFile == 0) pCfg->daysPerFile = 10; - pVnode->fileId = pVnode->firstKey / tsMsPerDay[pVnode->cfg.precision] / pCfg->daysPerFile; - pVnode->lastKeyOnFile = (int64_t)(pVnode->fileId + 1) * pCfg->daysPerFile * tsMsPerDay[pVnode->cfg.precision] - 1; + pVnode->fileId = pVnode->firstKey / tsMsPerDay[(uint8_t)pVnode->cfg.precision] / pCfg->daysPerFile; + pVnode->lastKeyOnFile = (int64_t)(pVnode->fileId + 1) * pCfg->daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision] - 1; pVnode->numOfFiles = 1; - vnodeCreateEmptyCompFile(vnode, pVnode->fileId); + if (vnodeCreateEmptyCompFile(vnode, pVnode->fileId) < 0) return -1; } - numOfFiles = (pVnode->lastKeyOnFile - pVnode->commitFirstKey) / tsMsPerDay[pVnode->cfg.precision] / pCfg->daysPerFile; + numOfFiles = (pVnode->lastKeyOnFile - pVnode->commitFirstKey) / tsMsPerDay[(uint8_t)pVnode->cfg.precision] / pCfg->daysPerFile; if (pVnode->commitFirstKey > pVnode->lastKeyOnFile) numOfFiles = -1; - dTrace("vid:%d, commitFirstKey:%ld lastKeyOnFile:%ld numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", - vnode, pVnode->commitFirstKey, pVnode->lastKeyOnFile, numOfFiles, pVnode->fileId, pVnode->numOfFiles); + dTrace("vid:%d, commitFirstKey:%ld lastKeyOnFile:%ld numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", pVnode->vnode, + pVnode->commitFirstKey, pVnode->lastKeyOnFile, numOfFiles, pVnode->fileId, pVnode->numOfFiles); if (numOfFiles >= pVnode->numOfFiles) { // create empty header files backward filesAdded = numOfFiles - pVnode->numOfFiles + 1; + assert(filesAdded <= pVnode->maxFiles + 2); for (int i = 0; i < filesAdded; ++i) { fileId = pVnode->fileId - pVnode->numOfFiles - i; if (vnodeCreateEmptyCompFile(vnode, fileId) < 0) #ifdef CLUSTER return vnodeRecoverFromPeer(pVnode, fileId); #else - return -1; + return -1; #endif } } else if (numOfFiles < 0) { @@ -232,20 +220,37 @@ int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { #ifdef CLUSTER return vnodeRecoverFromPeer(pVnode, pVnode->fileId); #else - return -1; + return -1; #endif - pVnode->lastKeyOnFile += (int64_t)tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile; + pVnode->lastKeyOnFile += (int64_t)tsMsPerDay[(uint8_t)pVnode->cfg.precision] * pCfg->daysPerFile; filesAdded = 1; numOfFiles = 0; // hacker way } fileId = pVnode->fileId - numOfFiles; pVnode->commitLastKey = - pVnode->lastKeyOnFile - (int64_t)numOfFiles * tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile; - pVnode->commitFirstKey = pVnode->commitLastKey - (int64_t)tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile + 1; + pVnode->lastKeyOnFile - (int64_t)numOfFiles * tsMsPerDay[(uint8_t)pVnode->cfg.precision] * pCfg->daysPerFile; + pVnode->commitFirstKey = pVnode->commitLastKey - (int64_t)tsMsPerDay[(uint8_t)pVnode->cfg.precision] * pCfg->daysPerFile + 1; pVnode->commitFileId = fileId; pVnode->numOfFiles = pVnode->numOfFiles + filesAdded; + return 0; +} + + +int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { + char name[TSDB_FILENAME_LEN]; + char dHeadName[TSDB_FILENAME_LEN] = "\0"; + char dLastName[TSDB_FILENAME_LEN] = "\0"; + int len = 0; + struct stat filestat; + int vnode = pVnode->vnode; + int fileId; + + if (vnodeCreateNeccessaryFiles(pVnode) < 0) return -1; + + fileId = pVnode->commitFileId; + dTrace("vid:%d, commit fileId:%d, commitLastKey:%ld, vnodeLastKey:%ld, lastKeyOnFile:%ld numOfFiles:%d", vnode, fileId, pVnode->commitLastKey, pVnode->lastKey, pVnode->lastKeyOnFile, pVnode->numOfFiles); @@ -316,7 +321,7 @@ int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { vnodeRecoverFromPeer(pVnode, fileId); goto _error; } else { - dTrace("vid:%d, data file:%s is opened to write", vnode, name); + dPrint("vid:%d, data file:%s is opened to write", vnode, name); } // open last file @@ -410,7 +415,7 @@ void vnodeRemoveFile(int vnode, int fileId) { int fd = open(headName, O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); if (fd > 0) { vnodeGetHeadFileHeaderInfo(fd, &headInfo); - __sync_fetch_and_add(&(pVnode->vnodeStatistic.totalStorage), -headInfo.totalStorage); + atomic_fetch_add_64(&(pVnode->vnodeStatistic.totalStorage), -headInfo.totalStorage); close(fd); } @@ -421,7 +426,7 @@ void vnodeRemoveFile(int vnode, int fileId) { remove(dDataName); remove(dLastName); - dTrace("vid:%d fileId:%d on disk: %s is removed, numOfFiles:%d maxFiles:%d", vnode, fileId, path, + dPrint("vid:%d fileId:%d on disk: %s is removed, numOfFiles:%d maxFiles:%d", vnode, fileId, path, pVnode->numOfFiles, pVnode->maxFiles); } @@ -607,7 +612,7 @@ _again: } // meter is going to be deleted, abort - if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dWarn("vid:%d sid:%d is dropped, ignore this meter", vnode, sid); continue; } @@ -1241,7 +1246,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] // assert(data[i]->len == points*pObj->schema[i].bytes); if (pCfg->compression) { - cdata[i]->len = (*pCompFunc[pObj->schema[i].type])(data[i]->data, points * pObj->schema[i].bytes, points, + cdata[i]->len = (*pCompFunc[(uint8_t)pObj->schema[i].type])(data[i]->data, points * pObj->schema[i].bytes, points, cdata[i]->data, pObj->schema[i].bytes*pObj->pointsPerFileBlock+EXTRA_BYTES, pCfg->compression, buffer, bufferSize); fields[i].len = cdata[i]->len; @@ -1249,6 +1254,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] offset += (cdata[i]->len + sizeof(TSCKSUM)); } else { + data[i]->len = pObj->schema[i].bytes * points; fields[i].len = data[i]->len; taosCalcChecksumAppend(0, (uint8_t *)(data[i]->data), data[i]->len + sizeof(TSCKSUM)); offset += (data[i]->len + sizeof(TSCKSUM)); @@ -1297,7 +1303,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] pCompBlock->len += wlen; } - dTrace("vid: %d vnode compStorage size is: %ld", pObj->vnode, pVnode->vnodeStatistic.compStorage); + dTrace("vid:%d, vnode compStorage size is: %ld", pObj->vnode, pVnode->vnodeStatistic.compStorage); pCompBlock->algorithm = pCfg->compression; pCompBlock->numOfPoints = points; @@ -1332,7 +1338,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { if (pVnode->numOfFiles <= 0) return 0; SVnodeCfg *pCfg = &pVnode->cfg; - delta = (int64_t)pCfg->daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + delta = (int64_t)pCfg->daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; latest = pObj->lastKeyOnFile; oldest = (pVnode->fileId - pVnode->numOfFiles + 1) * delta; @@ -1825,7 +1831,15 @@ int vnodeInitFile(int vnode) { pVnode->fmagic = (uint64_t *)calloc(pVnode->maxFiles + 1, sizeof(uint64_t)); int fileId = pVnode->fileId; - for (int i = 0; i < pVnode->numOfFiles; ++i) { + /* + * The actual files will far exceed the files that need to exist + */ + if (pVnode->numOfFiles > pVnode->maxFiles) { + dError("vid:%d numOfFiles:%d should not larger than maxFiles:%d", vnode, pVnode->numOfFiles, pVnode->maxFiles); + } + + int numOfFiles = MIN(pVnode->numOfFiles, pVnode->maxFiles); + for (int i = 0; i < numOfFiles; ++i) { if (vnodeUpdateFileMagic(vnode, fileId) < 0) { if (pVnode->cfg.replications > 1) { pVnode->badFileId = fileId; diff --git a/src/system/detail/src/vnodeFileUtil.c b/src/system/detail/src/vnodeFileUtil.c index f8de6c4a42453e35e6d3676890f9f6942c0a6fba..b40e7cfd41eed93e08c4959d55fd379bdc206c4d 100644 --- a/src/system/detail/src/vnodeFileUtil.c +++ b/src/system/detail/src/vnodeFileUtil.c @@ -14,11 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include +#include "os.h" #include "vnode.h" diff --git a/src/system/detail/src/vnodeFilterFunc.c b/src/system/detail/src/vnodeFilterFunc.c index bd829460d9dea0b2defeee046d55656bbbe99fc0..08fe78c188b2d0e1536bd57c40867783f24b4a16 100644 --- a/src/system/detail/src/vnodeFilterFunc.c +++ b/src/system/detail/src/vnodeFilterFunc.c @@ -14,11 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include +#include "os.h" #include "taosmsg.h" #include "tsqlfunction.h" diff --git a/src/system/detail/src/vnodeImport.c b/src/system/detail/src/vnodeImport.c index f50b6f49461def53da870f448e6d4aabd213ed95..6bf543e47068cbcce3aca822885b646689b175a4 100644 --- a/src/system/detail/src/vnodeImport.c +++ b/src/system/detail/src/vnodeImport.c @@ -14,32 +14,23 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" -#include "trpc.h" -#include "ttimer.h" #include "vnode.h" -#include "vnodeMgmt.h" -#include "vnodeShell.h" -#include "vnodeShell.h" #include "vnodeUtil.h" -#pragma GCC diagnostic ignored "-Wpointer-sign" -#pragma GCC diagnostic ignored "-Wint-conversion" - -typedef struct { - SCompHeader *headList; - SCompInfo compInfo; - int last; // 0:last block in data file, 1:not the last block - int newBlocks; - int oldNumOfBlocks; - int64_t compInfoOffset; // offset for compInfo in head file - int64_t leftOffset; // copy from this offset to end of head file - int64_t hfdSize; // old head file size -} SHeadInfo; - +#include "vnodeStatus.h" + +extern void vnodeGetHeadTname(char *nHeadName, char *nLastName, int vnode, int fileId); +extern int vnodeReadColumnToMem(int fd, SCompBlock *pBlock, SField **fields, int col, char *data, int dataSize, + char *temp, char *buffer, int bufferSize); +extern int vnodeSendShellSubmitRspMsg(SShellObj *pObj, int code, int numOfPoints); +extern void vnodeGetHeadDataLname(char *headName, char *dataName, char *lastName, int vnode, int fileId); +extern int vnodeCreateEmptyCompFile(int vnode, int fileId); +extern int vnodeUpdateFreeSlot(SVnodeObj *pVnode); +extern SCacheBlock *vnodeGetFreeCacheBlock(SVnodeObj *pVnode); +extern int vnodeCreateNeccessaryFiles(SVnodeObj *pVnode); + +#define KEY_AT_INDEX(payload, step, idx) (*(TSKEY *)((char *)(payload) + (step) * (idx))) typedef struct { void * signature; SShellObj *pShell; @@ -56,935 +47,1512 @@ typedef struct { // only for file int numOfPoints; - int fileId; int64_t offset; // offset in data file - SData *sdata[TSDB_MAX_COLUMNS]; - char *buffer; - char *payload; - char *opayload; + char * payload; + char * opayload; // allocated space for payload from client int rows; } SImportInfo; -int vnodeImportData(SMeterObj *pObj, SImportInfo *pImport); +typedef struct { + // in .head file + SCompHeader *pHeader; + size_t pHeaderSize; -int vnodeGetImportStartPart(SMeterObj *pObj, char *payload, int rows, TSKEY key1) { - int i; + SCompInfo compInfo; + SCompBlock *pBlocks; + // in .data file + int blockId; + uint8_t blockLoadState; - for (i = 0; i < rows; ++i) { - TSKEY key = *((TSKEY *)(payload + i * pObj->bytesPerPoint)); - if (key >= key1) break; - } + SField *pField; + size_t pFieldSize; - return i; -} + SData *data[TSDB_MAX_COLUMNS]; + char * buffer; -int vnodeGetImportEndPart(SMeterObj *pObj, char *payload, int rows, char **pStart, TSKEY key0) { - int i; + char *temp; - for (i = 0; i < rows; ++i) { - TSKEY key = *((TSKEY *)(payload + i * pObj->bytesPerPoint)); - if (key > key0) break; - } + char * tempBuffer; + size_t tempBufferSize; + // Variables for sendfile + int64_t compInfoOffset; + int64_t nextNo0Offset; // next sid whose compInfoOffset > 0 + int64_t hfSize; + int64_t driftOffset; - *pStart = payload + i * pObj->bytesPerPoint; - return rows - i; -} + int oldNumOfBlocks; + int newNumOfBlocks; + int last; +} SImportHandle; -int vnodeCloseFileForImport(SMeterObj *pObj, SHeadInfo *pHinfo) { - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - SVnodeCfg *pCfg = &pVnode->cfg; - TSCKSUM chksum = 0; +typedef struct { + int slot; + int pos; + int oslot; // old slot + TSKEY nextKey; +} SBlockIter; - if (pHinfo->newBlocks == 0 || pHinfo->compInfoOffset == 0) return 0; +typedef struct { + int64_t spos; + int64_t epos; + int64_t totalRows; + char * offset[]; +} SMergeBuffer; - if (pHinfo->oldNumOfBlocks == 0) twrite(pVnode->nfd, &chksum, sizeof(TSCKSUM)); +int vnodeImportData(SMeterObj *pObj, SImportInfo *pImport); - int leftSize = pHinfo->hfdSize - pHinfo->leftOffset; - if (leftSize > 0) { - lseek(pVnode->hfd, pHinfo->leftOffset, SEEK_SET); - tsendfile(pVnode->nfd, pVnode->hfd, NULL, leftSize); - } +int vnodeFindKeyInCache(SImportInfo *pImport, int order) { + SMeterObj * pObj = pImport->pObj; + int code = 0; + SQuery query; + SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; - pHinfo->compInfo.numOfBlocks += pHinfo->newBlocks; - int offset = (pHinfo->compInfo.numOfBlocks - pHinfo->oldNumOfBlocks) * sizeof(SCompBlock); - if (pHinfo->oldNumOfBlocks == 0) offset += sizeof(SCompInfo) + sizeof(TSCKSUM); + TSKEY key = order ? pImport->firstKey : pImport->lastKey; + memset(&query, 0, sizeof(query)); + query.order.order = order; + query.skey = key; + query.ekey = order ? pImport->lastKey : pImport->firstKey; + vnodeSearchPointInCache(pObj, &query); + + if (query.slot < 0) { + pImport->slot = pInfo->commitSlot; + if (pInfo->commitPoint >= pObj->pointsPerBlock) pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; + pImport->pos = 0; + pImport->key = 0; + dTrace("vid:%d sid:%d id:%s, key:%ld, import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); + code = 0; + } else { + pImport->slot = query.slot; + pImport->pos = query.pos; + pImport->key = query.key; - pHinfo->headList[pObj->sid].compInfoOffset = pHinfo->compInfoOffset; - for (int sid = pObj->sid + 1; sid < pCfg->maxSessions; ++sid) { - if (pHinfo->headList[sid].compInfoOffset) pHinfo->headList[sid].compInfoOffset += offset; + if (key != query.key) { + if (order == 0) { + // since pos is the position which has smaller key, data shall be imported after it + pImport->pos++; + if (pImport->pos >= pObj->pointsPerBlock) { + pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; + pImport->pos = 0; + } + } else { + if (pImport->pos < 0) pImport->pos = 0; + } + } + code = 0; } - lseek(pVnode->nfd, TSDB_FILE_HEADER_LEN, SEEK_SET); - int tmsize = sizeof(SCompHeader) * pCfg->maxSessions + sizeof(TSCKSUM); - taosCalcChecksumAppend(0, (uint8_t *)pHinfo->headList, tmsize); - twrite(pVnode->nfd, pHinfo->headList, tmsize); + return code; +} - int size = pHinfo->compInfo.numOfBlocks * sizeof(SCompBlock); - char *buffer = malloc(size); - lseek(pVnode->nfd, pHinfo->compInfoOffset + sizeof(SCompInfo), SEEK_SET); - read(pVnode->nfd, buffer, size); - SCompBlock *pBlock = (SCompBlock *)(buffer + (pHinfo->compInfo.numOfBlocks - 1) * sizeof(SCompBlock)); +void vnodeGetValidDataRange(int vnode, TSKEY now, TSKEY *minKey, TSKEY *maxKey) { + SVnodeObj *pVnode = vnodeList + vnode; - pHinfo->compInfo.uid = pObj->uid; - pHinfo->compInfo.delimiter = TSDB_VNODE_DELIMITER; - pHinfo->compInfo.last = pBlock->last; + int64_t delta = pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; + int fid = now / delta; + *minKey = (fid - pVnode->maxFiles + 1) * delta; + *maxKey = (fid + 2) * delta - 1; + return; +} - taosCalcChecksumAppend(0, (uint8_t *)(&pHinfo->compInfo), sizeof(SCompInfo)); - lseek(pVnode->nfd, pHinfo->compInfoOffset, SEEK_SET); - twrite(pVnode->nfd, &pHinfo->compInfo, sizeof(SCompInfo)); +int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, void *param, int sversion, + int *pNumOfPoints, TSKEY now) { + SSubmitMsg *pSubmit = (SSubmitMsg *)cont; + SVnodeObj * pVnode = vnodeList + pObj->vnode; + int rows = 0; + char * payload = NULL; + int code = TSDB_CODE_SUCCESS; + SCachePool *pPool = (SCachePool *)(pVnode->pCachePool); + SShellObj * pShell = (SShellObj *)param; + TSKEY firstKey, lastKey; - chksum = taosCalcChecksum(0, (uint8_t *)buffer, size); - lseek(pVnode->nfd, pHinfo->compInfoOffset + sizeof(SCompInfo) + size, SEEK_SET); - twrite(pVnode->nfd, &chksum, sizeof(TSCKSUM)); - free(buffer); + payload = pSubmit->payLoad; - vnodeCloseCommitFiles(pVnode); + rows = htons(pSubmit->numOfRows); + assert(rows > 0); + int expectedLen = rows * pObj->bytesPerPoint + sizeof(pSubmit->numOfRows); + if (expectedLen != contLen) { + dError("vid:%d sid:%d id:%s, invalid import, expected:%d, contLen:%d", pObj->vnode, pObj->sid, pObj->meterId, + expectedLen, contLen); + return TSDB_CODE_WRONG_MSG_SIZE; + } - return 0; -} + // Check timestamp context. + TSKEY minKey = 0, maxKey = 0; + firstKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, 0); + lastKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, rows - 1); + assert(firstKey <= lastKey); + vnodeGetValidDataRange(pObj->vnode, now, &minKey, &maxKey); + if (firstKey < minKey || firstKey > maxKey || lastKey < minKey || lastKey > maxKey) { + dError( + "vid:%d sid:%d id:%s, invalid timestamp to import, rows:%d firstKey: %ld lastKey: %ld minAllowedKey:%ld " + "maxAllowedKey:%ld", + pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, minKey, maxKey); + return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; + } + // forward to peers + if (pShell && pVnode->cfg.replications > 1) { + code = vnodeForwardToPeer(pObj, cont, contLen, TSDB_ACTION_IMPORT, sversion); + if (code != 0) return code; + } -int vnodeProcessLastBlock(SImportInfo *pImport, SHeadInfo *pHinfo, SData *data[]) { - SMeterObj *pObj = pImport->pObj; - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - SCompBlock lastBlock; - int code = 0; - - if (pHinfo->compInfo.last == 0) return 0; - - // read into memory - uint64_t offset = - pHinfo->compInfoOffset + (pHinfo->compInfo.numOfBlocks - 1) * sizeof(SCompBlock) + sizeof(SCompInfo); - lseek(pVnode->hfd, offset, SEEK_SET); - read(pVnode->hfd, &lastBlock, sizeof(SCompBlock)); - assert(lastBlock.last); - - if (lastBlock.sversion != pObj->sversion) { - lseek(pVnode->lfd, lastBlock.offset, SEEK_SET); - lastBlock.offset = lseek(pVnode->dfd, 0, SEEK_END); - tsendfile(pVnode->dfd, pVnode->lfd, NULL, lastBlock.len); - - lastBlock.last = 0; - lseek(pVnode->hfd, offset, SEEK_SET); - twrite(pVnode->hfd, &lastBlock, sizeof(SCompBlock)); - } else { - vnodeReadLastBlockToMem(pObj, &lastBlock, data); - pHinfo->compInfo.numOfBlocks--; - code = lastBlock.numOfPoints; + if (pVnode->cfg.commitLog && source != TSDB_DATA_SOURCE_LOG) { + if (pVnode->logFd < 0) return TSDB_CODE_INVALID_COMMIT_LOG; + code = vnodeWriteToCommitLog(pObj, TSDB_ACTION_IMPORT, cont, contLen, sversion); + if (code != 0) return code; } - return code; -} + /* + * The timestamp of all records in a submit payload are always in ascending order, guaranteed by client, so here only + * the first key. + */ + if (firstKey > pObj->lastKey) { // Just call insert + code = vnodeInsertPoints(pObj, cont, contLen, TSDB_DATA_SOURCE_LOG, NULL, sversion, pNumOfPoints, now); + } else { // trigger import + if (sversion != pObj->sversion) { + dError("vid:%d sid:%d id:%s, invalid sversion, expected:%d received:%d", pObj->vnode, pObj->sid, pObj->meterId, + pObj->sversion, sversion); + return TSDB_CODE_OTHERS; + } + + // check the table status for perform import historical data + if ((code = vnodeSetMeterInsertImportStateEx(pObj, TSDB_METER_STATE_IMPORTING)) != TSDB_CODE_SUCCESS) { + return code; + } + + SImportInfo import = {0}; + + dTrace("vid:%d sid:%d id:%s, try to import %d rows data, firstKey:%ld, lastKey:%ld, object lastKey:%ld", + pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, pObj->lastKey); + + import.firstKey = firstKey; + import.lastKey = lastKey; + import.pObj = pObj; + import.pShell = pShell; + import.payload = payload; + import.rows = rows; -int vnodeOpenFileForImport(SImportInfo *pImport, char *payload, SHeadInfo *pHinfo, SData *data[]) { - SMeterObj *pObj = pImport->pObj; - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - SVnodeCfg *pCfg = &pVnode->cfg; - TSKEY firstKey = *((TSKEY *)payload); - struct stat filestat; - int sid, rowsBefore = 0; + // FIXME: mutex here seems meaningless and num here still can be changed + int32_t num = 0; + pthread_mutex_lock(&pVnode->vmutex); + num = pObj->numOfQueries; + pthread_mutex_unlock(&pVnode->vmutex); - if (pVnode->nfd <= 0 || firstKey > pVnode->commitLastKey) { - if (pVnode->nfd > 0) vnodeCloseFileForImport(pObj, pHinfo); + int32_t commitInProcess = 0; - pVnode->commitFirstKey = firstKey; - if (vnodeOpenCommitFiles(pVnode, pObj->sid) < 0) return -1; + pthread_mutex_lock(&pPool->vmutex); + if (((commitInProcess = pPool->commitInProcess) == 1) || num > 0) { + // mutual exclusion with read (need to change here) + pthread_mutex_unlock(&pPool->vmutex); + vnodeClearMeterState(pObj, TSDB_METER_STATE_IMPORTING); + return TSDB_CODE_ACTION_IN_PROGRESS; - fstat(pVnode->hfd, &filestat); - pHinfo->hfdSize = filestat.st_size; - pHinfo->newBlocks = 0; - pHinfo->last = 1; // by default, new blockes are at the end of block list + } else { + pPool->commitInProcess = 1; + pthread_mutex_unlock(&pPool->vmutex); + code = vnodeImportData(pObj, &import); + *pNumOfPoints = import.importedRows; + } + pVnode->version++; + vnodeClearMeterState(pObj, TSDB_METER_STATE_IMPORTING); + } + + return code; +} - lseek(pVnode->hfd, TSDB_FILE_HEADER_LEN, SEEK_SET); - read(pVnode->hfd, pHinfo->headList, sizeof(SCompHeader) * pCfg->maxSessions); +/* Function to search keys in a range + * + * Assumption: keys in payload are in ascending order + * + * @payload: data records, key in ascending order + * @step: bytes each record takes + * @rows: number of data records + * @skey: range start (included) + * @ekey: range end (included) + * @srows: rtype, start index of records + * @nrows: rtype, number of records in range + * + * @rtype: 0 means find data in the range + * -1 means find no data in the range + */ +static int vnodeSearchKeyInRange(char *payload, int step, int rows, TSKEY skey, TSKEY ekey, int *srow, int *nrows) { + if (rows <= 0 || KEY_AT_INDEX(payload, step, 0) > ekey || KEY_AT_INDEX(payload, step, rows - 1) < skey || skey > ekey) + return -1; - if (pHinfo->headList[pObj->sid].compInfoOffset > 0) { - lseek(pVnode->hfd, pHinfo->headList[pObj->sid].compInfoOffset, SEEK_SET); - if (read(pVnode->hfd, &pHinfo->compInfo, sizeof(SCompInfo)) != sizeof(SCompInfo)) { - dError("vid:%d sid:%d, failed to read compInfo from file:%s", pObj->vnode, pObj->sid, pVnode->cfn); - return -1; - } + int left = 0; + int right = rows - 1; + int mid; - if (pHinfo->compInfo.uid == pObj->uid) { - pHinfo->compInfoOffset = pHinfo->headList[pObj->sid].compInfoOffset; - pHinfo->leftOffset = pHinfo->headList[pObj->sid].compInfoOffset + sizeof(SCompInfo); - } else { - pHinfo->headList[pObj->sid].compInfoOffset = 0; - } - } + // Binary search the first key in payload >= skey + do { + mid = (left + right) / 2; + if (skey < KEY_AT_INDEX(payload, step, mid)) { + right = mid; + } else if (skey > KEY_AT_INDEX(payload, step, mid)) { + left = mid + 1; + } else { + break; + } + } while (left < right); - if ( pHinfo->headList[pObj->sid].compInfoOffset == 0 ) { - memset(&pHinfo->compInfo, 0, sizeof(SCompInfo)); - pHinfo->compInfo.uid = pObj->uid; + if (skey <= KEY_AT_INDEX(payload, step, mid)) { + *srow = mid; + } else { + if (mid + 1 >= rows) { + return -1; + } else { + *srow = mid + 1; + } + } - for (sid = pObj->sid + 1; sid < pCfg->maxSessions; ++sid) - if (pHinfo->headList[sid].compInfoOffset > 0) break; + assert(skey <= KEY_AT_INDEX(payload, step, *srow)); - pHinfo->compInfoOffset = (sid == pCfg->maxSessions) ? pHinfo->hfdSize : pHinfo->headList[sid].compInfoOffset; - pHinfo->leftOffset = pHinfo->compInfoOffset; + *nrows = 0; + for (int i = *srow; i < rows; i++) { + if (KEY_AT_INDEX(payload, step, i) <= ekey) { + (*nrows)++; + } else { + break; } + } - pHinfo->oldNumOfBlocks = pHinfo->compInfo.numOfBlocks; - lseek(pVnode->hfd, 0, SEEK_SET); - lseek(pVnode->nfd, 0, SEEK_SET); - tsendfile(pVnode->nfd, pVnode->hfd, NULL, pHinfo->compInfoOffset); - twrite(pVnode->nfd, &pHinfo->compInfo, sizeof(SCompInfo)); - if (pHinfo->headList[pObj->sid].compInfoOffset > 0) lseek(pVnode->hfd, sizeof(SCompInfo), SEEK_CUR); + if (*nrows == 0) return -1; - if (pVnode->commitFileId < pImport->fileId) { - if (pHinfo->compInfo.numOfBlocks > 0) - pHinfo->leftOffset += pHinfo->compInfo.numOfBlocks * sizeof(SCompBlock); + return 0; +} - rowsBefore = vnodeProcessLastBlock(pImport, pHinfo, data); +int vnodeOpenMinFilesForImport(int vnode, int fid) { + char dname[TSDB_FILENAME_LEN] = "\0"; + SVnodeObj * pVnode = vnodeList + vnode; + struct stat filestat; + int minFileSize; - // copy all existing compBlockInfo - lseek(pVnode->hfd, pHinfo->compInfoOffset + sizeof(SCompInfo), SEEK_SET); - if (pHinfo->compInfo.numOfBlocks > 0) - tsendfile(pVnode->nfd, pVnode->hfd, NULL, pHinfo->compInfo.numOfBlocks * sizeof(SCompBlock)); + minFileSize = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM); - } else if (pVnode->commitFileId == pImport->fileId) { - int slots = pImport->pos ? pImport->slot + 1 : pImport->slot; - pHinfo->leftOffset += slots * sizeof(SCompBlock); + vnodeGetHeadDataLname(pVnode->cfn, dname, pVnode->lfn, vnode, fid); - // check if last block is at last file, if it is, read into memory - if (pImport->pos == 0 && pHinfo->compInfo.numOfBlocks > 0 && pImport->slot == pHinfo->compInfo.numOfBlocks && - pHinfo->compInfo.last) { - rowsBefore = vnodeProcessLastBlock(pImport, pHinfo, data); - if ( rowsBefore > 0 ) pImport->slot--; - } + // Open .head file + pVnode->hfd = open(pVnode->cfn, O_RDONLY); + if (pVnode->hfd < 0) { + dError("vid:%d, failed to open head file:%s, reason:%s", vnode, pVnode->cfn, strerror(errno)); + taosLogError("vid:%d, failed to open head file:%s, reason:%s", vnode, pVnode->cfn, strerror(errno)); + goto _error_open; + } - // this block will be replaced by new blocks - if (pImport->pos > 0) pHinfo->compInfo.numOfBlocks--; + fstat(pVnode->hfd, &filestat); + if (filestat.st_size < minFileSize) { + dError("vid:%d, head file:%s is corrupted", vnode, pVnode->cfn); + taosLogError("vid:%d, head file:%s corrupted", vnode, pVnode->cfn); + goto _error_open; + } - if (pImport->slot > 0) { - lseek(pVnode->hfd, pHinfo->compInfoOffset + sizeof(SCompInfo), SEEK_SET); - tsendfile(pVnode->nfd, pVnode->hfd, NULL, pImport->slot * sizeof(SCompBlock)); - } + // Open .data file + pVnode->dfd = open(dname, O_RDWR); + if (pVnode->dfd < 0) { + dError("vid:%d, failed to open data file:%s, reason:%s", vnode, dname, strerror(errno)); + taosLogError("vid:%d, failed to open data file:%s, reason:%s", vnode, dname, strerror(errno)); + goto _error_open; + } - if (pImport->slot < pHinfo->compInfo.numOfBlocks) - pHinfo->last = 0; // new blocks are not at the end of block list + fstat(pVnode->dfd, &filestat); + if (filestat.st_size < TSDB_FILE_HEADER_LEN) { + dError("vid:%d, data file:%s corrupted", vnode, dname); + taosLogError("vid:%d, data file:%s corrupted", vnode, dname); + goto _error_open; + } - } else { - // nothing + // Open .last file + pVnode->lfd = open(pVnode->lfn, O_RDWR); + if (pVnode->lfd < 0) { + dError("vid:%d, failed to open last file:%s, reason:%s", vnode, pVnode->lfn, strerror(errno)); + taosLogError("vid:%d, failed to open last file:%s, reason:%s", vnode, pVnode->lfn, strerror(errno)); + goto _error_open; + } - pHinfo->last = 0; // new blocks are not at the end of block list - } + fstat(pVnode->lfd, &filestat); + if (filestat.st_size < TSDB_FILE_HEADER_LEN) { + dError("vid:%d, last file:%s corrupted", vnode, pVnode->lfn); + taosLogError("vid:%d, last file:%s corrupted", vnode, pVnode->lfn); + goto _error_open; } - return rowsBefore; -} + return 0; -extern int vnodeSendShellSubmitRspMsg(SShellObj *pObj, int code, int numOfPoints); -int vnodeImportToFile(SImportInfo *pImport); +_error_open: + if (pVnode->hfd > 0) close(pVnode->hfd); + pVnode->hfd = 0; -void vnodeProcessImportTimer(void *param, void *tmrId) { - SImportInfo *pImport = (SImportInfo *)param; - if (pImport == NULL || pImport->signature != param) { - dError("import timer is messed up, signature:%p", pImport); - return; - } + if (pVnode->dfd > 0) close(pVnode->dfd); + pVnode->dfd = 0; - SMeterObj *pObj = pImport->pObj; - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - SCachePool *pPool = (SCachePool *)pVnode->pCachePool; - SShellObj *pShell = pImport->pShell; + if (pVnode->lfd > 0) close(pVnode->lfd); + pVnode->lfd = 0; - pImport->retry++; + return -1; +} - //slow query will block the import operation - int32_t state = vnodeSetMeterState(pObj, TSDB_METER_STATE_IMPORTING); - if (state >= TSDB_METER_STATE_DELETING) { - dError("vid:%d sid:%d id:%s, meter is deleted, failed to import, state:%d", - pObj->vnode, pObj->sid, pObj->meterId, state); - return; +/* Function to open .t file and sendfile the first part + */ +int vnodeOpenTempFilesForImport(SImportHandle *pHandle, SMeterObj *pObj, int fid) { + char dHeadName[TSDB_FILENAME_LEN] = "\0"; + SVnodeObj * pVnode = vnodeList + pObj->vnode; + struct stat filestat; + int sid; + + // cfn: .head + if (readlink(pVnode->cfn, dHeadName, TSDB_FILENAME_LEN) < 0) return -1; + + size_t len = strlen(dHeadName); + // switch head name + switch (dHeadName[len - 1]) { + case '0': + dHeadName[len - 1] = '1'; + break; + case '1': + dHeadName[len - 1] = '0'; + break; + default: + dError("vid: %d, fid: %d, head target filename not end with 0 or 1", pVnode->vnode, fid); + return -1; } - int32_t num = 0; - pthread_mutex_lock(&pVnode->vmutex); - num = pObj->numOfQueries; - pthread_mutex_unlock(&pVnode->vmutex); + vnodeGetHeadTname(pVnode->nfn, NULL, pVnode->vnode, fid); + if (symlink(dHeadName, pVnode->nfn) < 0) return -1; - //if the num == 0, it will never be increased before state is set to TSDB_METER_STATE_READY - int32_t commitInProcess = 0; - pthread_mutex_lock(&pPool->vmutex); - if (((commitInProcess = pPool->commitInProcess) == 1) || num > 0 || state != TSDB_METER_STATE_READY) { - pthread_mutex_unlock(&pPool->vmutex); - vnodeClearMeterState(pObj, TSDB_METER_STATE_IMPORTING); + pVnode->nfd = open(pVnode->nfn, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + if (pVnode->nfd < 0) { + dError("vid:%d, failed to open new head file:%s, reason:%s", pVnode->vnode, pVnode->nfn, strerror(errno)); + taosLogError("vid:%d, failed to open new head file:%s, reason:%s", pVnode->vnode, pVnode->nfn, strerror(errno)); + return -1; + } - if (pImport->retry < 1000) { - dTrace("vid:%d sid:%d id:%s, import failed, retry later. commit in process or queries on it, or not ready." - "commitInProcess:%d, numOfQueries:%d, state:%d", pObj->vnode, pObj->sid, pObj->meterId, - commitInProcess, num, state); + fstat(pVnode->hfd, &filestat); + pHandle->hfSize = filestat.st_size; - taosTmrStart(vnodeProcessImportTimer, 10, pImport, vnodeTmrCtrl); - return; - } else { - pShell->code = TSDB_CODE_TOO_SLOW; - } - } else { - pPool->commitInProcess = 1; - pthread_mutex_unlock(&pPool->vmutex); - int code = vnodeImportData(pObj, pImport); - if (pShell) { - pShell->code = code; - pShell->numOfTotalPoints += pImport->importedRows; - } + // Find the next sid whose compInfoOffset > 0 + for (sid = pObj->sid + 1; sid < pVnode->cfg.maxSessions; sid++) { + if (pHandle->pHeader[sid].compInfoOffset > 0) break; } - vnodeClearMeterState(pObj, TSDB_METER_STATE_IMPORTING); + pHandle->nextNo0Offset = (sid == pVnode->cfg.maxSessions) ? pHandle->hfSize : pHandle->pHeader[sid].compInfoOffset; - pVnode->version++; + // FIXME: sendfile the original part + // TODO: Here, we need to take the deleted table case in consideration, this function + // just assume the case is handled before calling this function + if (pHandle->pHeader[pObj->sid].compInfoOffset > 0) { + pHandle->compInfoOffset = pHandle->pHeader[pObj->sid].compInfoOffset; + } else { + pHandle->compInfoOffset = pHandle->nextNo0Offset; + } - // send response back to shell - if (pShell) { - pShell->count--; - if (pShell->count <= 0) vnodeSendShellSubmitRspMsg(pImport->pShell, pShell->code, pShell->numOfTotalPoints); + assert(pHandle->compInfoOffset <= pHandle->hfSize); + + lseek(pVnode->hfd, 0, SEEK_SET); + lseek(pVnode->nfd, 0, SEEK_SET); + if (tsendfile(pVnode->nfd, pVnode->hfd, NULL, pHandle->compInfoOffset) < 0) { + return -1; } - pImport->signature = NULL; - free(pImport->opayload); - free(pImport); -} + // Leave a SCompInfo space here + lseek(pVnode->nfd, sizeof(SCompInfo), SEEK_CUR); -int vnodeImportToFile(SImportInfo *pImport) { - SMeterObj *pObj = pImport->pObj; - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - SVnodeCfg *pCfg = &pVnode->cfg; - SHeadInfo headInfo; - int code = 0, col; - SCompBlock compBlock; - char * payload = pImport->payload; - int rows = pImport->rows; - SCachePool *pPool = (SCachePool *)pVnode->pCachePool; - - TSKEY lastKey = *((TSKEY *)(payload + pObj->bytesPerPoint * (rows - 1))); - TSKEY firstKey = *((TSKEY *)payload); - memset(&headInfo, 0, sizeof(headInfo)); - headInfo.headList = malloc(sizeof(SCompHeader) * pCfg->maxSessions + sizeof(TSCKSUM)); - - SData *cdata[TSDB_MAX_COLUMNS]; - char *buffer1 = - malloc(pObj->bytesPerPoint * pCfg->rowsInFileBlock + (sizeof(SData) + EXTRA_BYTES) * pObj->numOfColumns); - cdata[0] = (SData *)buffer1; + return 0; +} - SData *data[TSDB_MAX_COLUMNS]; - char *buffer2 = - malloc(pObj->bytesPerPoint * pCfg->rowsInFileBlock + (sizeof(SData) + EXTRA_BYTES) * pObj->numOfColumns); - data[0] = (SData *)buffer2; +typedef enum { DATA_LOAD_TIMESTAMP = 0x1, DATA_LOAD_OTHER_DATA = 0x2 } DataLoadMod; - for (col = 1; col < pObj->numOfColumns; ++col) { - cdata[col] = (SData *)(((char *)cdata[col - 1]) + sizeof(SData) + EXTRA_BYTES + - pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); - data[col] = (SData *)(((char *)data[col - 1]) + sizeof(SData) + EXTRA_BYTES + - pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); - } +/* Function to load a block data at the requirement of mod + */ +static int vnodeLoadNeededBlockData(SMeterObj *pObj, SImportHandle *pHandle, int blockId, uint8_t loadMod, int *code) { + size_t size; + SCompBlock *pBlock = pHandle->pBlocks + blockId; + *code = TSDB_CODE_SUCCESS; - int rowsBefore = 0; - int rowsRead = 0; - int rowsUnread = 0; - int leftRows = rows; // left number of rows of imported data - int row, rowsToWrite; - int64_t offset[TSDB_MAX_COLUMNS]; + SVnodeObj *pVnode = vnodeList + pObj->vnode; - if (pImport->pos > 0) { - for (col = 0; col < pObj->numOfColumns; ++col) - memcpy(data[col]->data, pImport->sdata[col]->data, pImport->pos * pObj->schema[col].bytes); + int dfd = pBlock->last ? pVnode->lfd : pVnode->dfd; - rowsBefore = pImport->pos; - rowsRead = pImport->pos; - rowsUnread = pImport->numOfPoints - pImport->pos; + if (pHandle->blockId != blockId) { + pHandle->blockId = blockId; + pHandle->blockLoadState = 0; } - dTrace("vid:%d sid:%d id:%s, %d rows data will be imported to file, firstKey:%ld lastKey:%ld", - pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey); - do { - if (leftRows > 0) { - code = vnodeOpenFileForImport(pImport, payload, &headInfo, data); - if (code < 0) goto _exit; - if (code > 0) { - rowsBefore = code; - code = 0; - }; - } else { - // if payload is already imported, rows unread shall still be processed - rowsBefore = 0; + if (pHandle->blockLoadState == 0){ // Reload pField + size = sizeof(SField) * pBlock->numOfCols + sizeof(TSCKSUM); + if (pHandle->pFieldSize < size) { + pHandle->pField = (SField *)realloc((void *)(pHandle->pField), size); + if (pHandle->pField == NULL) { + dError("vid: %d, sid: %d, meterId: %s, failed to allocate memory, size: %ul", pObj->vnode, pObj->sid, + pObj->meterId, size); + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + return -1; + } + pHandle->pFieldSize = size; } - int rowsToProcess = pObj->pointsPerFileBlock - rowsBefore; - if (rowsToProcess > leftRows) rowsToProcess = leftRows; + lseek(dfd, pBlock->offset, SEEK_SET); + if (read(dfd, (void *)(pHandle->pField), pHandle->pFieldSize) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to read data file, size:%ld reason:%s", pVnode->vnode, pObj->sid, + pObj->meterId, pHandle->pFieldSize, strerror(errno)); + *code = TSDB_CODE_FILE_CORRUPTED; + return -1; + } - for (col = 0; col < pObj->numOfColumns; ++col) { - offset[col] = data[col]->data + rowsBefore * pObj->schema[col].bytes; + if (!taosCheckChecksumWhole((uint8_t *)(pHandle->pField), pHandle->pFieldSize)) { + dError("vid:%d sid:%d meterId:%s, data file %s is broken since checksum mismatch", pVnode->vnode, pObj->sid, + pObj->meterId, pVnode->lfn); + *code = TSDB_CODE_FILE_CORRUPTED; + return -1; } + } - row = 0; - if (leftRows > 0) { - for (row = 0; row < rowsToProcess; ++row) { - if (*((TSKEY *)payload) > pVnode->commitLastKey) break; + { // Allocate necessary buffer + size = pObj->bytesPerPoint * pObj->pointsPerFileBlock + + (sizeof(SData) + EXTRA_BYTES + sizeof(TSCKSUM)) * pObj->numOfColumns; + if (pHandle->buffer == NULL) { + pHandle->buffer = malloc(size); + if (pHandle->buffer == NULL) { + dError("vid: %d, sid: %d, meterId: %s, failed to allocate memory, size: %ul", pObj->vnode, pObj->sid, + pObj->meterId, size); + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + return -1; + } - for (col = 0; col < pObj->numOfColumns; ++col) { - memcpy((void *)offset[col], payload, pObj->schema[col].bytes); - payload += pObj->schema[col].bytes; - offset[col] += pObj->schema[col].bytes; - } + // TODO: Init data + pHandle->data[0] = (SData *)(pHandle->buffer); + for (int col = 1; col < pObj->numOfColumns; col++) { + pHandle->data[col] = (SData *)((char *)(pHandle->data[col - 1]) + sizeof(SData) + EXTRA_BYTES + + sizeof(TSCKSUM) + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); } } - leftRows -= row; - rowsToWrite = rowsBefore + row; - rowsBefore = 0; - - if (leftRows == 0 && rowsUnread > 0) { - // copy the unread - int rowsToCopy = pObj->pointsPerFileBlock - rowsToWrite; - if (rowsToCopy > rowsUnread) rowsToCopy = rowsUnread; - - for (col = 0; col < pObj->numOfColumns; ++col) { - int bytes = pObj->schema[col].bytes; - memcpy(data[col]->data + rowsToWrite * bytes, pImport->sdata[col]->data + rowsRead * bytes, rowsToCopy * bytes); + if (pHandle->temp == NULL) { + pHandle->temp = malloc(size); + if (pHandle->temp == NULL) { + dError("vid: %d, sid: %d, meterId: %s, failed to allocate memory, size: %ul", pObj->vnode, pObj->sid, + pObj->meterId, size); + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + return -1; } - - rowsRead += rowsToCopy; - rowsUnread -= rowsToCopy; - rowsToWrite += rowsToCopy; } - for (col = 0; col < pObj->numOfColumns; ++col) { - data[col]->len = rowsToWrite * pObj->schema[col].bytes; + if (pHandle->tempBuffer == NULL) { + pHandle->tempBufferSize = pObj->maxBytes * pObj->pointsPerFileBlock + EXTRA_BYTES + sizeof(TSCKSUM); + pHandle->tempBuffer = malloc(pHandle->tempBufferSize); + if (pHandle->tempBuffer == NULL) { + dError("vid: %d, sid: %d, meterId: %s, failed to allocate memory, size: %ul", pObj->vnode, pObj->sid, + pObj->meterId, pHandle->tempBufferSize); + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + return -1; + } } + } - compBlock.last = headInfo.last; - vnodeWriteBlockToFile(pObj, &compBlock, data, cdata, rowsToWrite); - twrite(pVnode->nfd, &compBlock, sizeof(SCompBlock)); - - rowsToWrite = 0; - headInfo.newBlocks++; - - } while (leftRows > 0 || rowsUnread > 0); + if ((loadMod & DATA_LOAD_TIMESTAMP) && + (~(pHandle->blockLoadState & DATA_LOAD_TIMESTAMP))) { // load only timestamp part + if (vnodeReadColumnToMem(dfd, pBlock, &(pHandle->pField), PRIMARYKEY_TIMESTAMP_COL_INDEX, + pHandle->data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY) * pBlock->numOfPoints, + pHandle->temp, pHandle->tempBuffer, pHandle->tempBufferSize) < 0) { + *code = TSDB_CODE_FILE_CORRUPTED; + return -1; + } - if (compBlock.keyLast > pObj->lastKeyOnFile) - pObj->lastKeyOnFile = compBlock.keyLast; + pHandle->blockLoadState |= DATA_LOAD_TIMESTAMP; + } - vnodeCloseFileForImport(pObj, &headInfo); - dTrace("vid:%d sid:%d id:%s, %d rows data are imported to file", pObj->vnode, pObj->sid, pObj->meterId, rows); + if ((loadMod & DATA_LOAD_OTHER_DATA) && (~(pHandle->blockLoadState & DATA_LOAD_OTHER_DATA))) { // load other columns + for (int col = 1; col < pBlock->numOfCols; col++) { + if (vnodeReadColumnToMem(dfd, pBlock, &(pHandle->pField), col, pHandle->data[col]->data, + pBlock->numOfPoints * pObj->schema[col].bytes, pHandle->temp, pHandle->tempBuffer, + pHandle->tempBufferSize) < 0) { + *code = TSDB_CODE_FILE_CORRUPTED; + return -1; + } + } - SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; - pthread_mutex_lock(&pPool->vmutex); + pHandle->blockLoadState |= DATA_LOAD_OTHER_DATA; + } - if (pInfo->numOfBlocks > 0) { - int slot = (pInfo->currentSlot - pInfo->numOfBlocks + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; - TSKEY firstKeyInCache = *((TSKEY *)(pInfo->cacheBlocks[slot]->offset[0])); + return 0; +} - // data may be in commited cache, cache shall be released - if (lastKey > firstKeyInCache) { - while (slot != pInfo->commitSlot) { - SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; - vnodeFreeCacheBlock(pCacheBlock); - slot = (slot + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; +static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { + SVnodeObj *pVnode = vnodeList + pObj->vnode; + char dpath[TSDB_FILENAME_LEN] = "\0"; + SCompInfo compInfo; + __off_t offset = 0; + + if (pVnode->nfd > 0) { + offset = lseek(pVnode->nfd, 0, SEEK_CUR); + assert(offset == pHandle->nextNo0Offset + pHandle->driftOffset); + + { // Write the SCompInfo part + compInfo.uid = pObj->uid; + compInfo.last = pHandle->last; + compInfo.numOfBlocks = pHandle->newNumOfBlocks + pHandle->oldNumOfBlocks; + compInfo.delimiter = TSDB_VNODE_DELIMITER; + taosCalcChecksumAppend(0, (uint8_t *)(&compInfo), sizeof(SCompInfo)); + + lseek(pVnode->nfd, pHandle->compInfoOffset, SEEK_SET); + if (twrite(pVnode->nfd, (void *)(&compInfo), sizeof(SCompInfo)) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to wirte SCompInfo, reason:%s", pObj->vnode, pObj->sid, pObj->meterId, + strerror(errno)); + return -1; } + } - // last slot, the uncommitted slots shall be shifted - SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; - int points = pCacheBlock->numOfPoints - pInfo->commitPoint; - if (points > 0) { - for (int col = 0; col < pObj->numOfColumns; ++col) { - int size = points * pObj->schema[col].bytes; - memmove(pCacheBlock->offset[col], pCacheBlock->offset[col] + pObj->schema[col].bytes * pInfo->commitPoint, size); - } + // Write the rest of the SCompBlock part + if (pHandle->hfSize > pHandle->nextNo0Offset) { + lseek(pVnode->nfd, 0, SEEK_END); + lseek(pVnode->hfd, pHandle->nextNo0Offset, SEEK_SET); + if (tsendfile(pVnode->nfd, pVnode->hfd, NULL, pHandle->hfSize - pHandle->nextNo0Offset) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to sendfile, size:%ld, reason:%s", pObj->vnode, pObj->sid, + pObj->meterId, pHandle->hfSize - pHandle->nextNo0Offset, strerror(errno)); + return -1; } + } - if (pInfo->commitPoint != pObj->pointsPerBlock) { - // commit point shall be set to 0 if last block is not full - pInfo->commitPoint = 0; - pCacheBlock->numOfPoints = points; - if (slot == pInfo->currentSlot) { - __sync_fetch_and_add(&pObj->freePoints, pInfo->commitPoint); - } - } else { - // if last block is full and committed - SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; - if (pCacheBlock->pMeterObj == pObj) { - vnodeFreeCacheBlock(pCacheBlock); - } + // Write SCompHeader part + pHandle->pHeader[pObj->sid].compInfoOffset = pHandle->compInfoOffset; + for (int sid = pObj->sid + 1; sid < pVnode->cfg.maxSessions; ++sid) { + if (pHandle->pHeader[sid].compInfoOffset > 0) { + pHandle->pHeader[sid].compInfoOffset += pHandle->driftOffset; } } + + taosCalcChecksumAppend(0, (uint8_t *)(pHandle->pHeader), pHandle->pHeaderSize); + lseek(pVnode->nfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + if (twrite(pVnode->nfd, (void *)(pHandle->pHeader), pHandle->pHeaderSize) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to wirte SCompHeader part, size:%ld, reason:%s", pObj->vnode, pObj->sid, + pObj->meterId, pHandle->pHeaderSize, strerror(errno)); + return -1; + } } - if (lastKey > pObj->lastKeyOnFile) pObj->lastKeyOnFile = lastKey; + // Close opened files + close(pVnode->dfd); + pVnode->dfd = 0; - pthread_mutex_unlock(&pPool->vmutex); + close(pVnode->hfd); + pVnode->hfd = 0; -_exit: - tfree(headInfo.headList); - tfree(buffer1); - tfree(buffer2); - tfree(pImport->buffer); + close(pVnode->lfd); + pVnode->lfd = 0; - return code; + if (pVnode->nfd > 0) { + close(pVnode->nfd); + pVnode->nfd = 0; + + readlink(pVnode->cfn, dpath, TSDB_FILENAME_LEN); + rename(pVnode->nfn, pVnode->cfn); + remove(dpath); + } + + return 0; } -int vnodeImportToCache(SImportInfo *pImport, char *payload, int rows) { - SMeterObj *pObj = pImport->pObj; - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - SVnodeCfg *pCfg = &pVnode->cfg; - int code = -1; - SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; - int slot, pos, row, col, points, tpoints; +static void vnodeConvertRowsToCols(SMeterObj *pObj, const char *payload, int rows, SData *data[], int rowOffset) { + int sdataRow; + int offset; - char *data[TSDB_MAX_COLUMNS], *current[TSDB_MAX_COLUMNS]; - int slots = pInfo->unCommittedBlocks + 1; - int trows = slots * pObj->pointsPerBlock + rows; // max rows in buffer - int tsize = (trows / pObj->pointsPerBlock + 1) * pCfg->cacheBlockSize; - TSKEY firstKey = *((TSKEY *)payload); - TSKEY lastKey = *((TSKEY *)(payload + pObj->bytesPerPoint * (rows - 1))); + for (int row = 0; row < rows; ++row) { + sdataRow = row + rowOffset; + offset = 0; + for (int col = 0; col < pObj->numOfColumns; ++col) { + memcpy(data[col]->data + sdataRow * pObj->schema[col].bytes, payload + pObj->bytesPerPoint * row + offset, + pObj->schema[col].bytes); - if (pObj->freePoints < rows || pObj->freePoints < (pObj->pointsPerBlock << 1)) { - dError("vid:%d sid:%d id:%s, import failed, cache is full, freePoints:%d", pObj->vnode, pObj->sid, pObj->meterId, - pObj->freePoints); - pImport->importedRows = 0; - pImport->commit = 1; - code = TSDB_CODE_ACTION_IN_PROGRESS; - return code; + offset += pObj->schema[col].bytes; + } } +} - dTrace("vid:%d sid:%d id:%s, %d rows data will be imported to cache, firstKey:%ld lastKey:%ld", - pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey); +static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int rows, int fid) { + SMeterObj * pObj = (SMeterObj *)(pImport->pObj); + SVnodeObj * pVnode = vnodeList + pObj->vnode; + SImportHandle importHandle; + size_t size = 0; + SData * data[TSDB_MAX_COLUMNS]; + char * buffer = NULL; + SData * cdata[TSDB_MAX_COLUMNS]; + char * cbuffer = NULL; + SCompBlock compBlock; + TSCKSUM checksum = 0; + int pointsImported = 0; + int code = TSDB_CODE_SUCCESS; + SCachePool * pPool = (SCachePool *)pVnode->pCachePool; + SCacheInfo * pInfo = (SCacheInfo *)(pObj->pCache); + TSKEY lastKeyImported = 0; + + TSKEY delta = pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; + TSKEY minFileKey = fid * delta; + TSKEY maxFileKey = minFileKey + delta - 1; + TSKEY firstKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, 0); + TSKEY lastKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, rows - 1); + + assert(firstKey >= minFileKey && firstKey <= maxFileKey && lastKey >= minFileKey && lastKey <= maxFileKey); + + // create neccessary files + pVnode->commitFirstKey = firstKey; + if (vnodeCreateNeccessaryFiles(pVnode) < 0) return TSDB_CODE_OTHERS; + + assert(pVnode->commitFileId == fid); + + // Open least files to import .head(hfd) .data(dfd) .last(lfd) + if (vnodeOpenMinFilesForImport(pObj->vnode, fid) < 0) return TSDB_CODE_FILE_CORRUPTED; + + memset(&importHandle, 0, sizeof(SImportHandle)); + + { // Load SCompHeader part from .head file + importHandle.pHeaderSize = sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM); + importHandle.pHeader = (SCompHeader *)malloc(importHandle.pHeaderSize); + if (importHandle.pHeader == NULL) { + dError("vid: %d, sid: %d, meterId: %s, failed to allocate memory, size: %ul", pObj->vnode, pObj->sid, + pObj->meterId, importHandle.pHeaderSize); + code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _error_merge; + } - pthread_mutex_lock(&(pVnode->vmutex)); - if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; - pthread_mutex_unlock(&(pVnode->vmutex)); + lseek(pVnode->hfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + if (read(pVnode->hfd, (void *)(importHandle.pHeader), importHandle.pHeaderSize) < importHandle.pHeaderSize) { + dError("vid: %d, sid: %d, meterId: %s, fid: %d failed to read SCompHeader part, reason:%s", pObj->vnode, + pObj->sid, pObj->meterId, fid, strerror(errno)); + code = TSDB_CODE_FILE_CORRUPTED; + goto _error_merge; + } - char *buffer = malloc(tsize); // buffer to hold unCommitted data plus import data - data[0] = buffer; - current[0] = data[0]; - for (col = 1; col < pObj->numOfColumns; ++col) { - data[col] = data[col - 1] + trows * pObj->schema[col - 1].bytes; - current[col] = data[col]; + if (!taosCheckChecksumWhole((uint8_t *)(importHandle.pHeader), importHandle.pHeaderSize)) { + dError("vid: %d, sid: %d, meterId: %s, fid: %d SCompHeader part is broken", pObj->vnode, pObj->sid, pObj->meterId, + fid); + code = TSDB_CODE_FILE_CORRUPTED; + goto _error_merge; + } } - // write import data into buffer first - for (row = 0; row < rows; ++row) { - for (col = 0; col < pObj->numOfColumns; ++col) { - memcpy(current[col], payload, pObj->schema[col].bytes); - payload += pObj->schema[col].bytes; - current[col] += pObj->schema[col].bytes; + { // Initialize data[] and cdata[], which is used to hold data to write to data file + size = pObj->bytesPerPoint * pVnode->cfg.rowsInFileBlock + (sizeof(SData) + EXTRA_BYTES + sizeof(TSCKSUM)) * pObj->numOfColumns; + + buffer = (char *)malloc(size); + if (buffer == NULL) { + dError("vid: %d, sid: %d, meterId: %s, failed to allocate memory, size: %ul", pObj->vnode, pObj->sid, + pObj->meterId, size); + code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _error_merge; } - } - // copy the overwritten data into buffer - tpoints = rows; - pos = pImport->pos; - slot = pImport->slot; - while (1) { - points = pInfo->cacheBlocks[slot]->numOfPoints - pos; - for (col = 0; col < pObj->numOfColumns; ++col) { - int size = points * pObj->schema[col].bytes; - memcpy(current[col], pInfo->cacheBlocks[slot]->offset[col] + pos * pObj->schema[col].bytes, size); - current[col] += size; + cbuffer = (char *)malloc(size); + if (cbuffer == NULL) { + dError("vid: %d, sid: %d, meterId: %s, failed to allocate memory, size: %ul", pObj->vnode, pObj->sid, + pObj->meterId, size); + code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _error_merge; } - pos = 0; - tpoints += points; - if (slot == pInfo->currentSlot) break; - slot = (slot + 1) % pInfo->maxBlocks; - } + data[0] = (SData *)buffer; + cdata[0] = (SData *)cbuffer; - for (col = 0; col < pObj->numOfColumns; ++col) current[col] = data[col]; - pos = pImport->pos; + for (int col = 1; col < pObj->numOfColumns; col++) { + data[col] = (SData *)((char *)data[col - 1] + sizeof(SData) + EXTRA_BYTES + sizeof(TSCKSUM) + + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); + cdata[col] = (SData *)((char *)cdata[col - 1] + sizeof(SData) + EXTRA_BYTES + sizeof(TSCKSUM) + + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); + } + } - // write back to existing slots first - slot = pImport->slot; - while (1) { - points = (tpoints > pObj->pointsPerBlock - pos) ? pObj->pointsPerBlock - pos : tpoints; - SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; - for (col = 0; col < pObj->numOfColumns; ++col) { - int size = points * pObj->schema[col].bytes; - memcpy(pCacheBlock->offset[col] + pos * pObj->schema[col].bytes, current[col], size); - current[col] += size; + if (importHandle.pHeader[pObj->sid].compInfoOffset == 0) { // No data in this file, just write it + _write_empty_point: + if (vnodeOpenTempFilesForImport(&importHandle, pObj, fid) < 0) { + code = TSDB_CODE_OTHERS; + goto _error_merge; } - pCacheBlock->numOfPoints = points + pos; - pos = 0; - tpoints -= points; + importHandle.oldNumOfBlocks = 0; + importHandle.driftOffset += sizeof(SCompInfo); + lastKeyImported = lastKey; + + for (int rowsWritten = 0; rowsWritten < rows;) { + int rowsToWrite = MIN(pVnode->cfg.rowsInFileBlock, (rows - rowsWritten) /* the rows left */); + vnodeConvertRowsToCols(pObj, payload + rowsWritten * pObj->bytesPerPoint, rowsToWrite, data, 0); + pointsImported += rowsToWrite; + + compBlock.last = 1; + if (vnodeWriteBlockToFile(pObj, &compBlock, data, cdata, rowsToWrite) < 0) { + // TODO: deal with ERROR here + } - if (slot == pInfo->currentSlot) break; - slot = (slot + 1) % pInfo->maxBlocks; - } + importHandle.last = compBlock.last; + + checksum = taosCalcChecksum(checksum, (uint8_t *)(&compBlock), sizeof(SCompBlock)); + twrite(pVnode->nfd, &compBlock, sizeof(SCompBlock)); + importHandle.newNumOfBlocks++; + importHandle.driftOffset += sizeof(SCompBlock); - // allocate new cache block if there are still data left - while (tpoints > 0) { - pImport->commit = vnodeAllocateCacheBlock(pObj); - if (pImport->commit < 0) goto _exit; - points = (tpoints > pObj->pointsPerBlock) ? pObj->pointsPerBlock : tpoints; - SCacheBlock *pCacheBlock = pInfo->cacheBlocks[pInfo->currentSlot]; - for (col = 0; col < pObj->numOfColumns; ++col) { - int size = points * pObj->schema[col].bytes; - memcpy(pCacheBlock->offset[col] + pos * pObj->schema[col].bytes, current[col], size); - current[col] += size; + rowsWritten += rowsToWrite; } - tpoints -= points; - pCacheBlock->numOfPoints = points; - } + twrite(pVnode->nfd, &checksum, sizeof(TSCKSUM)); + importHandle.driftOffset += sizeof(TSCKSUM); + } else { // Else if there are old data in this file. + { // load SCompInfo and SCompBlock part + lseek(pVnode->hfd, importHandle.pHeader[pObj->sid].compInfoOffset, SEEK_SET); + if (read(pVnode->hfd, (void *)(&(importHandle.compInfo)), sizeof(SCompInfo)) < sizeof(SCompInfo)) { + dError("vid:%d sid:%d meterId:%s, failed to read .head file, reason:%s", pVnode->vnode, pObj->sid, + pObj->meterId, strerror(errno)); + code = TSDB_CODE_FILE_CORRUPTED; + goto _error_merge; + } - code = 0; - __sync_fetch_and_sub(&pObj->freePoints, rows); - dTrace("vid:%d sid:%d id:%s, %d rows data are imported to cache", pObj->vnode, pObj->sid, pObj->meterId, rows); + if ((importHandle.compInfo.delimiter != TSDB_VNODE_DELIMITER) || + (!taosCheckChecksumWhole((uint8_t *)(&(importHandle.compInfo)), sizeof(SCompInfo)))) { + dError("vid:%d sid:%d meterId:%s, .head file %s is broken, delemeter:%x", pVnode->vnode, pObj->sid, + pObj->meterId, pVnode->cfn, importHandle.compInfo.delimiter); + code = TSDB_CODE_FILE_CORRUPTED; + goto _error_merge; + } -_exit: - free(buffer); - return code; -} + // Check the context of SCompInfo part + if (importHandle.compInfo.uid != pObj->uid) { // The data belongs to the other meter + goto _write_empty_point; + } -int vnodeFindKeyInFile(SImportInfo *pImport, int order) { - SMeterObj *pObj = pImport->pObj; - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - int code = -1; - SQuery query; - SColumnInfoEx colList[TSDB_MAX_COLUMNS] = {0}; + importHandle.oldNumOfBlocks = importHandle.compInfo.numOfBlocks; + importHandle.last = importHandle.compInfo.last; - TSKEY key = order ? pImport->firstKey : pImport->lastKey; - memset(&query, 0, sizeof(query)); - query.order.order = order; - query.skey = key; - query.ekey = order ? INT64_MAX : 0; - query.colList = colList; - query.numOfCols = pObj->numOfColumns; - - for (int16_t i = 0; i < pObj->numOfColumns; ++i) { - colList[i].data.colId = pObj->schema[i].colId; - colList[i].data.bytes = pObj->schema[i].bytes; - colList[i].data.type = pObj->schema[i].type; - - colList[i].colIdx = i; - colList[i].colIdxInBuf = i; - } - - int ret = vnodeSearchPointInFile(pObj, &query); - - if (ret >= 0) { - if (query.slot < 0) { - pImport->slot = 0; - pImport->pos = 0; - pImport->key = 0; - pImport->fileId = pVnode->fileId - pVnode->numOfFiles + 1; - dTrace("vid:%d sid:%d id:%s, import to head of file", pObj->vnode, pObj->sid, pObj->meterId); - code = 0; - } else if (query.slot >= 0) { - code = 0; - pImport->slot = query.slot; - pImport->pos = query.pos; - pImport->key = query.key; - pImport->fileId = query.fileId; - SCompBlock *pBlock = &query.pBlock[query.slot]; - pImport->numOfPoints = pBlock->numOfPoints; - - if (pImport->key != key) { - if (order == 0) { - pImport->pos++; - - if (pImport->pos >= pBlock->numOfPoints) { - pImport->slot++; - pImport->pos = 0; + size = sizeof(SCompBlock) * importHandle.compInfo.numOfBlocks + sizeof(TSCKSUM); + importHandle.pBlocks = (SCompBlock *)malloc(size); + if (importHandle.pBlocks == NULL) { + dError("vid:%d sid:%d meterId:%s, failed to allocate importHandle.pBlock, size:%ul", pVnode->vnode, pObj->sid, + pObj->meterId, size); + code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _error_merge; + } + + if (read(pVnode->hfd, (void *)(importHandle.pBlocks), size) < size) { + dError("vid:%d sid:%d meterId:%s, failed to read importHandle.pBlock, reason:%s", pVnode->vnode, pObj->sid, + pObj->meterId, strerror(errno)); + code = TSDB_CODE_FILE_CORRUPTED; + goto _error_merge; + } + + if (!taosCheckChecksumWhole((uint8_t *)(importHandle.pBlocks), size)) { + dError("vid:%d sid:%d meterId:%s, pBlock part is broken in %s", pVnode->vnode, pObj->sid, pObj->meterId, + pVnode->cfn); + code = TSDB_CODE_FILE_CORRUPTED; + goto _error_merge; + } + } + + /* Now we have _payload_, we have _importHandle.pBlocks_, just merge payload into the importHandle.pBlocks + * + * Input: payload, pObj->bytesPerBlock, rows, importHandle.pBlocks + */ + { + int payloadIter = 0; + SBlockIter blockIter = {0, 0, 0, 0}; + + while (1) { + if (payloadIter >= rows) { // payload end, break + // write the remaining blocks to the file + if (pVnode->nfd > 0) { + int blocksLeft = importHandle.compInfo.numOfBlocks - blockIter.oslot; + if (blocksLeft > 0) { + checksum = taosCalcChecksum(checksum, (uint8_t *)(importHandle.pBlocks + blockIter.oslot), + sizeof(SCompBlock) * blocksLeft); + if (twrite(pVnode->nfd, (void *)(importHandle.pBlocks + blockIter.oslot), + sizeof(SCompBlock) * blocksLeft) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to write %s file, size:%ul, reason:%s", pVnode->vnode, + pObj->sid, pObj->meterId, pVnode->nfn, sizeof(SCompBlock) * blocksLeft, strerror(errno)); + code = TSDB_CODE_OTHERS; + goto _error_merge; + } + } + + if (twrite(pVnode->nfd, (void *)(&checksum), sizeof(TSCKSUM)) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to write %s file, size:%ul, reason:%s", pVnode->vnode, pObj->sid, + pObj->meterId, pVnode->nfn, sizeof(TSCKSUM), strerror(errno)); + code = TSDB_CODE_OTHERS; + goto _error_merge; + } + } + break; + } + + if (blockIter.slot >= importHandle.compInfo.numOfBlocks) { // blocks end, break + // Should never come here + assert(false); + } + + TSKEY key = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); + + { // Binary search the (slot, pos) which is >= key as well as nextKey + int left = blockIter.slot; + int right = importHandle.compInfo.numOfBlocks - 1; + TSKEY minKey = importHandle.pBlocks[left].keyFirst; + TSKEY maxKey = importHandle.pBlocks[right].keyLast; + + assert(minKey <= maxKey); + + if (key < minKey) { // Case 1. write just ahead the blockIter.slot + blockIter.slot = left; + blockIter.pos = 0; + blockIter.nextKey = minKey; + } else if (key > maxKey) { // Case 2. write to the end + if (importHandle.pBlocks[right].last) { // Case 2.1 last block in .last file, need to merge + assert(importHandle.last != 0); + importHandle.last = 0; + blockIter.slot = right; + blockIter.pos = importHandle.pBlocks[right].numOfPoints; + } else { // Case 2.2 just write after the last block + blockIter.slot = right + 1; + blockIter.pos = 0; + } + blockIter.nextKey = maxFileKey + 1; + } else { // Case 3. need to search the block for slot and pos + if (key == minKey || key == maxKey) { + payloadIter++; + continue; + } + + // Here: minKey < key < maxKey + + int mid; + TSKEY blockMinKey; + TSKEY blockMaxKey; + + // Binary search the slot + do { + mid = (left + right) / 2; + blockMinKey = importHandle.pBlocks[mid].keyFirst; + blockMaxKey = importHandle.pBlocks[mid].keyLast; + + assert(blockMinKey <= blockMaxKey); + + if (key < blockMinKey) { + right = mid; + } else if (key > blockMaxKey) { + left = mid + 1; + } else { /* blockMinKey <= key <= blockMaxKey */ + break; + } + } while (left < right); + + if (key == blockMinKey || key == blockMaxKey) { // duplicate key + payloadIter++; + continue; + } + + // Get the slot + if (key > blockMaxKey) { /* pos = 0 or pos = ? */ + blockIter.slot = mid + 1; + } else { /* key < blockMinKey (pos = 0) || (key > blockMinKey && key < blockMaxKey) (pos=?) */ + blockIter.slot = mid; + } + + // Get the pos + assert(blockIter.slot < importHandle.compInfo.numOfBlocks); + + if (key == importHandle.pBlocks[blockIter.slot].keyFirst || + key == importHandle.pBlocks[blockIter.slot].keyLast) { + payloadIter++; + continue; + } + + assert(key < importHandle.pBlocks[blockIter.slot].keyLast); + + /* */ + if (key < importHandle.pBlocks[blockIter.slot].keyFirst) { + blockIter.pos = 0; + blockIter.nextKey = importHandle.pBlocks[blockIter.slot].keyFirst; + } else { + SCompBlock *pBlock = importHandle.pBlocks + blockIter.slot; + if (pBlock->sversion != pObj->sversion) { /*TODO*/ + } + if (vnodeLoadNeededBlockData(pObj, &importHandle, blockIter.slot, DATA_LOAD_TIMESTAMP, &code) < 0) { + goto _error_merge; + } + int pos = (*vnodeSearchKeyFunc[pObj->searchAlgorithm])( + importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, pBlock->numOfPoints, key, TSQL_SO_ASC); + assert(pos != 0); + if (KEY_AT_INDEX(importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY), pos) == key) { + payloadIter++; + continue; + } + + blockIter.pos = pos; + blockIter.nextKey = (blockIter.slot + 1 < importHandle.compInfo.numOfBlocks) + ? importHandle.pBlocks[blockIter.slot + 1].keyFirst + : maxFileKey + 1; + // Need to merge with this block + if (importHandle.pBlocks[blockIter.slot].last) { // this is to merge with the last block + assert((blockIter.slot == (importHandle.compInfo.numOfBlocks - 1))); + importHandle.last = 0; + } + } } - } else { - if (pImport->pos < 0) pImport->pos = 0; } - } - if (pImport->key != key && pImport->pos > 0) { - if ( pObj->sversion != pBlock->sversion ) { - dError("vid:%d sid:%d id:%s, import sversion not matached, expected:%d received:%d", pObj->vnode, pObj->sid, - pBlock->sversion, pObj->sversion); + int aslot = MIN(blockIter.slot, importHandle.compInfo.numOfBlocks - 1); + int64_t sversion = importHandle.pBlocks[aslot].sversion; + if (sversion != pObj->sversion) { code = TSDB_CODE_OTHERS; - } else { - pImport->offset = pBlock->offset; - - pImport->buffer = - malloc(pObj->bytesPerPoint * pVnode->cfg.rowsInFileBlock + sizeof(SData) * pObj->numOfColumns); - pImport->sdata[0] = (SData *)pImport->buffer; - for (int col = 1; col < pObj->numOfColumns; ++col) - pImport->sdata[col] = (SData *)(((char *)pImport->sdata[col - 1]) + sizeof(SData) + - pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); - - code = vnodeReadCompBlockToMem(pObj, &query, pImport->sdata); - if (code < 0) { - code = -code; - tfree(pImport->buffer); + goto _error_merge; + } + + // Open the new .t file if not opened yet. + if (pVnode->nfd <= 0) { + if (vnodeOpenTempFilesForImport(&importHandle, pObj, fid) < 0) { + code = TSDB_CODE_OTHERS; + goto _error_merge; } } - } - } - } else { - dError("vid:%d sid:%d id:%s, file is corrupted, import failed", pObj->vnode, pObj->sid, pObj->meterId); - code = -ret; - } - tclose(query.hfd); - tclose(query.dfd); - tclose(query.lfd); - vnodeFreeFields(&query); - tfree(query.pBlock); + if (blockIter.slot > blockIter.oslot) { // write blocks in range [blockIter.oslot, blockIter.slot) to .t file + checksum = taosCalcChecksum(checksum, (uint8_t *)(importHandle.pBlocks + blockIter.oslot), + sizeof(SCompBlock) * (blockIter.slot - blockIter.oslot)); + if (twrite(pVnode->nfd, (void *)(importHandle.pBlocks + blockIter.oslot), + sizeof(SCompBlock) * (blockIter.slot - blockIter.oslot)) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to write %s file, size:%ul, reason:%s", pVnode->vnode, pObj->sid, + pObj->meterId, pVnode->nfn, sizeof(SCompBlock) * (blockIter.slot - blockIter.oslot), + strerror(errno)); + code = TSDB_CODE_OTHERS; + goto _error_merge; + } - return code; -} + blockIter.oslot = blockIter.slot; + } -int vnodeFindKeyInCache(SImportInfo *pImport, int order) { - SMeterObj *pObj = pImport->pObj; - int code = 0; - SQuery query; - SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; + if (blockIter.pos == 0) { // No need to merge + // copy payload part to data + int rowOffset = 0; + for (; payloadIter < rows; rowOffset++) { + if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) >= blockIter.nextKey) break; - TSKEY key = order ? pImport->firstKey : pImport->lastKey; - memset(&query, 0, sizeof(query)); - query.order.order = order; - query.skey = key; - query.ekey = order ? pImport->lastKey : pImport->firstKey; - vnodeSearchPointInCache(pObj, &query); + vnodeConvertRowsToCols(pObj, payload + pObj->bytesPerPoint * payloadIter, 1, data, rowOffset); + pointsImported++; + lastKeyImported = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); + payloadIter++; + } - if (query.slot < 0) { - pImport->slot = pInfo->commitSlot; - if (pInfo->commitPoint >= pObj->pointsPerBlock) pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; - pImport->pos = 0; - pImport->key = 0; - dTrace("vid:%d sid:%d id:%s, key:%ld, import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); - code = 0; - } else { - pImport->slot = query.slot; - pImport->pos = query.pos; - pImport->key = query.key; + // write directly to .data file + compBlock.last = 0; + if (vnodeWriteBlockToFile(pObj, &compBlock, data, cdata, rowOffset) < 0) { + // TODO: Deal with the ERROR here + } - if (key != query.key) { - if (order == 0) { - // since pos is the position which has smaller key, data shall be imported after it - pImport->pos++; - if (pImport->pos >= pObj->pointsPerBlock) { - pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; - pImport->pos = 0; + checksum = taosCalcChecksum(checksum, (uint8_t *)(&compBlock), sizeof(SCompBlock)); + if (twrite(pVnode->nfd, &compBlock, sizeof(SCompBlock)) < 0) { + // TODO : deal with the ERROR here + } + importHandle.newNumOfBlocks++; + importHandle.driftOffset += sizeof(SCompBlock); + } else { // Merge block and payload from payloadIter + + if (vnodeLoadNeededBlockData(pObj, &importHandle, blockIter.slot, + DATA_LOAD_TIMESTAMP | DATA_LOAD_OTHER_DATA, &code) < 0) { // Load neccessary blocks + goto _error_merge; + } + + importHandle.oldNumOfBlocks--; + importHandle.driftOffset -= sizeof(SCompBlock); + + int rowOffset = blockIter.pos; // counter for data + + // Copy the front part + for (int col = 0; col < pObj->numOfColumns; col++) { + memcpy((void *)(data[col]->data), (void *)(importHandle.data[col]->data), + pObj->schema[col].bytes * blockIter.pos); + } + + // Merge part + while (1) { + if (rowOffset >= pVnode->cfg.rowsInFileBlock) { // data full in a block to commit + compBlock.last = 0; + if (vnodeWriteBlockToFile(pObj, &compBlock, data, cdata, rowOffset) < 0) { + // TODO : deal with the ERROR here + } + + checksum = taosCalcChecksum(checksum, (uint8_t *)(&compBlock), sizeof(SCompBlock)); + if (twrite(pVnode->nfd, (void *)(&compBlock), sizeof(SCompBlock)) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to write %s file, size:%ul, reason:%s", pVnode->vnode, + pObj->sid, pObj->meterId, pVnode->nfn, sizeof(SCompBlock), strerror(errno)); + goto _error_merge; + } + importHandle.newNumOfBlocks++; + importHandle.driftOffset += sizeof(SCompBlock); + rowOffset = 0; + } + + if ((payloadIter >= rows || KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) >= blockIter.nextKey) && + blockIter.pos >= importHandle.pBlocks[blockIter.slot].numOfPoints) + break; + + if (payloadIter >= rows || + KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) >= blockIter.nextKey) { // payload end + for (int col = 0; col < pObj->numOfColumns; col++) { + memcpy(data[col]->data + rowOffset * pObj->schema[col].bytes, + importHandle.data[col]->data + pObj->schema[col].bytes * blockIter.pos, pObj->schema[col].bytes); + } + blockIter.pos++; + rowOffset++; + } else if (blockIter.pos >= importHandle.pBlocks[blockIter.slot].numOfPoints) { // block end + vnodeConvertRowsToCols(pObj, payload + pObj->bytesPerPoint * payloadIter, 1, data, rowOffset); + pointsImported++; + lastKeyImported = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); + payloadIter++; + rowOffset++; + } else { + if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) == + KEY_AT_INDEX(importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY), + blockIter.pos)) { // duplicate key + payloadIter++; + continue; + } else if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) < + KEY_AT_INDEX(importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY), + blockIter.pos)) { + vnodeConvertRowsToCols(pObj, payload + pObj->bytesPerPoint * payloadIter, 1, data, rowOffset); + pointsImported++; + lastKeyImported = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); + payloadIter++; + rowOffset++; + } else { + for (int col = 0; col < pObj->numOfColumns; col++) { + memcpy(data[col]->data + rowOffset * pObj->schema[col].bytes, + importHandle.data[col]->data + pObj->schema[col].bytes * blockIter.pos, + pObj->schema[col].bytes); + } + blockIter.pos++; + rowOffset++; + } + } + } + if (rowOffset > 0) { // data full in a block to commit + compBlock.last = 0; + if (vnodeWriteBlockToFile(pObj, &compBlock, data, cdata, rowOffset) < 0) { + // TODO : deal with the ERROR here + } + + checksum = taosCalcChecksum(checksum, (uint8_t *)(&compBlock), sizeof(SCompBlock)); + if (twrite(pVnode->nfd, (void *)(&compBlock), sizeof(SCompBlock)) < 0) { + dError("vid:%d sid:%d meterId:%s, failed to write %s file, size:%ul, reason:%s", pVnode->vnode, pObj->sid, + pObj->meterId, pVnode->nfn, sizeof(SCompBlock), strerror(errno)); + goto _error_merge; + } + importHandle.newNumOfBlocks++; + importHandle.driftOffset += sizeof(SCompBlock); + rowOffset = 0; + } + + blockIter.slot++; + blockIter.oslot = blockIter.slot; } - } else { - if (pImport->pos < 0) pImport->pos = 0; } } - code = 0; } - return code; -} + // Write the SCompInfo part + if (vnodeCloseImportFiles(pObj, &importHandle) < 0) { + code = TSDB_CODE_OTHERS; + goto _error_merge; + } -int vnodeImportStartToCache(SImportInfo *pImport, char *payload, int rows) { - int code = 0; - SMeterObj *pObj = pImport->pObj; + pImport->importedRows += pointsImported; - code = vnodeFindKeyInCache(pImport, 1); - if (code != 0) return code; + pthread_mutex_lock(&(pPool->vmutex)); + if (pInfo->numOfBlocks > 0) { + int slot = (pInfo->currentSlot - pInfo->numOfBlocks + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + TSKEY firstKeyInCache = *((TSKEY *)(pInfo->cacheBlocks[slot]->offset[0])); - if (pImport->key != pImport->firstKey) { - rows = vnodeGetImportStartPart(pObj, payload, rows, pImport->key); - pImport->importedRows = rows; - code = vnodeImportToCache(pImport, payload, rows); - } else { - dTrace("vid:%d sid:%d id:%s, data is already imported to cache", pObj->vnode, pObj->sid, pObj->meterId); + // data may be in commited cache, cache shall be released + if (lastKeyImported > firstKeyInCache) { + while (slot != pInfo->commitSlot) { + SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; + vnodeFreeCacheBlock(pCacheBlock); + slot = (slot + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + } + + if (pInfo->commitPoint == pObj->pointsPerBlock) { + if (pInfo->cacheBlocks[pInfo->commitSlot]->pMeterObj == pObj) { + vnodeFreeCacheBlock(pInfo->cacheBlocks[pInfo->commitSlot]); + } + } + } } + pthread_mutex_unlock(&(pPool->vmutex)); + + // TODO: free the allocated memory + tfree(buffer); + tfree(cbuffer); + tfree(importHandle.pHeader); + tfree(importHandle.pBlocks); + tfree(importHandle.pField); + tfree(importHandle.buffer); + tfree(importHandle.temp); + tfree(importHandle.tempBuffer); return code; -} - -int vnodeImportStartToFile(SImportInfo *pImport, char *payload, int rows) { - int code = 0; - SMeterObj *pObj = pImport->pObj; - code = vnodeFindKeyInFile(pImport, 1); - if (code != 0) return code; - - if (pImport->key != pImport->firstKey) { - pImport->payload = payload; - pImport->rows = vnodeGetImportStartPart(pObj, payload, rows, pImport->key); - pImport->importedRows = pImport->rows; - code = vnodeImportToFile(pImport); - } else { - dTrace("vid:%d sid:%d id:%s, data is already imported to file", pObj->vnode, pObj->sid, pObj->meterId); +_error_merge: + tfree(buffer); + tfree(cbuffer); + tfree(importHandle.pHeader); + tfree(importHandle.pBlocks); + tfree(importHandle.pField); + tfree(importHandle.buffer); + tfree(importHandle.temp); + tfree(importHandle.tempBuffer); + + close(pVnode->dfd); + pVnode->dfd = 0; + + close(pVnode->hfd); + pVnode->hfd = 0; + + close(pVnode->lfd); + pVnode->lfd = 0; + + if (pVnode->nfd > 0) { + close(pVnode->nfd); + pVnode->nfd = 0; + remove(pVnode->nfn); } return code; } -int vnodeImportWholeToFile(SImportInfo *pImport, char *payload, int rows) { - int code = 0; - SMeterObj *pObj = pImport->pObj; +#define FORWARD_ITER(iter, step, slotLimit, posLimit) \ + { \ + if ((iter.pos) + (step) < (posLimit)) { \ + (iter.pos) = (iter.pos) + (step); \ + } else { \ + (iter.pos) = 0; \ + (iter.slot) = ((iter.slot) + 1) % (slotLimit); \ + } \ + } - code = vnodeFindKeyInFile(pImport, 0); - if (code != 0) return code; +int isCacheEnd(SBlockIter iter, SMeterObj *pMeter) { + SCacheInfo *pInfo = (SCacheInfo *)(pMeter->pCache); + int slot = 0; + int pos = 0; - if (pImport->key != pImport->lastKey) { - pImport->payload = payload; - pImport->rows = vnodeGetImportEndPart(pObj, payload, rows, &pImport->payload, pImport->key); - pImport->importedRows = pImport->rows; - code = vnodeImportToFile(pImport); + if (pInfo->cacheBlocks[pInfo->currentSlot]->numOfPoints == pMeter->pointsPerBlock) { + slot = (pInfo->currentSlot + 1) % (pInfo->maxBlocks); + pos = 0; } else { - code = vnodeImportStartToFile(pImport, payload, rows); + slot = pInfo->currentSlot; + pos = pInfo->cacheBlocks[pInfo->currentSlot]->numOfPoints; } - - return code; + return ((iter.slot == slot) && (iter.pos == pos)); } -int vnodeImportWholeToCache(SImportInfo *pImport, char *payload, int rows) { - int code = 0; - SMeterObj *pObj = pImport->pObj; +static void vnodeFlushMergeBuffer(SMergeBuffer *pBuffer, SBlockIter *pWriteIter, SBlockIter *pCacheIter, + SMeterObj *pObj, SCacheInfo *pInfo, int checkBound) { + // Function to flush the merge buffer data to cache + if (pWriteIter->pos == pObj->pointsPerBlock) { + pWriteIter->pos = 0; + pWriteIter->slot = (pWriteIter->slot + 1) % pInfo->maxBlocks; + } - code = vnodeFindKeyInCache(pImport, 0); - if (code != 0) return code; + while (pBuffer->spos != pBuffer->epos) { + if (checkBound && pWriteIter->slot == pCacheIter->slot && pWriteIter->pos == pCacheIter->pos) break; + for (int col = 0; col < pObj->numOfColumns; col++) { + memcpy(pInfo->cacheBlocks[pWriteIter->slot]->offset[col] + pObj->schema[col].bytes * pWriteIter->pos, + pBuffer->offset[col] + pObj->schema[col].bytes * pBuffer->spos, pObj->schema[col].bytes); + } - if (pImport->key != pImport->lastKey) { - char *pStart; - if ( pImport->key < pObj->lastKeyOnFile ) pImport->key = pObj->lastKeyOnFile; - rows = vnodeGetImportEndPart(pObj, payload, rows, &pStart, pImport->key); - pImport->importedRows = rows; - code = vnodeImportToCache(pImport, pStart, rows); - } else { - if (pImport->firstKey > pObj->lastKeyOnFile) { - code = vnodeImportStartToCache(pImport, payload, rows); - } else if (pImport->firstKey < pObj->lastKeyOnFile) { - code = vnodeImportStartToFile(pImport, payload, rows); - } else { // firstKey == pObj->lastKeyOnFile - dTrace("vid:%d sid:%d id:%s, data is already there", pObj->vnode, pObj->sid, pObj->meterId); + if (pWriteIter->pos + 1 < pObj->pointsPerBlock) { + (pWriteIter->pos)++; + } else { + pInfo->cacheBlocks[pWriteIter->slot]->numOfPoints = pWriteIter->pos + 1; + pWriteIter->slot = (pWriteIter->slot + 1) % pInfo->maxBlocks; + pWriteIter->pos = 0; } + + pBuffer->spos = (pBuffer->spos + 1) % pBuffer->totalRows; } - return code; + if ((!checkBound) && pWriteIter->pos != 0) { + pInfo->cacheBlocks[pWriteIter->slot]->numOfPoints = pWriteIter->pos; + } } -int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, void *param, int sversion, - int *pNumOfPoints, TSKEY now) { - SSubmitMsg *pSubmit = (SSubmitMsg *)cont; - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - int rows; - char *payload; - int code = TSDB_CODE_ACTION_IN_PROGRESS; - SCachePool *pPool = (SCachePool *)pVnode->pCachePool; - SShellObj *pShell = (SShellObj *)param; - int pointsImported = 0; - - rows = htons(pSubmit->numOfRows); - int expectedLen = rows * pObj->bytesPerPoint + sizeof(pSubmit->numOfRows); - if (expectedLen != contLen) { - dError("vid:%d sid:%d id:%s, invalid import, expected:%d, contLen:%d", pObj->vnode, pObj->sid, pObj->meterId, - expectedLen, contLen); - return TSDB_CODE_WRONG_MSG_SIZE; +int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int rows) { + SMeterObj * pObj = pImport->pObj; + SVnodeObj * pVnode = vnodeList + pObj->vnode; + int code = -1; + SCacheInfo * pInfo = (SCacheInfo *)(pObj->pCache); + int payloadIter; + SCachePool * pPool = (SCachePool *)(pVnode->pCachePool); + int isCacheIterEnd = 0; + int spayloadIter = 0; + int isAppendData = 0; + int rowsImported = 0; + int totalRows = 0; + size_t size = 0; + SMergeBuffer *pBuffer = NULL; + + TSKEY firstKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, 0); + TSKEY lastKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, rows - 1); + + assert(firstKey <= lastKey && firstKey > pObj->lastKeyOnFile); + + // TODO: make this condition less strict + if (pObj->freePoints < rows || pObj->freePoints < (pObj->pointsPerBlock << 1)) { // No free room to hold the data + dError("vid:%d sid:%d id:%s, import failed, cache is full, freePoints:%d", pObj->vnode, pObj->sid, pObj->meterId, + pObj->freePoints); + pImport->importedRows = 0; + pImport->commit = 1; + code = TSDB_CODE_ACTION_IN_PROGRESS; + return code; } - if (sversion != pObj->sversion) { - dError("vid:%d sid:%d id:%s, invalid sversion, expected:%d received:%d", pObj->vnode, pObj->sid, pObj->meterId, - pObj->sversion, sversion); - return TSDB_CODE_OTHERS; + if (pInfo->numOfBlocks == 0) { + if (vnodeAllocateCacheBlock(pObj) < 0) { + pImport->importedRows = 0; + pImport->commit = 1; + code = TSDB_CODE_ACTION_IN_PROGRESS; + return code; + } } - payload = pSubmit->payLoad; - TSKEY firstKey = *(TSKEY *)payload; - TSKEY lastKey = *(TSKEY *)(payload + pObj->bytesPerPoint*(rows-1)); - int cfid = now/pVnode->cfg.daysPerFile/tsMsPerDay[pVnode->cfg.precision]; - TSKEY minAllowedKey = (cfid - pVnode->maxFiles + 1)*pVnode->cfg.daysPerFile*tsMsPerDay[pVnode->cfg.precision]; - TSKEY maxAllowedKey = (cfid + 2)*pVnode->cfg.daysPerFile*tsMsPerDay[pVnode->cfg.precision] - 1; - if (firstKey < minAllowedKey || firstKey > maxAllowedKey || lastKey < minAllowedKey || lastKey > maxAllowedKey) { - dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%lld, data is out of range, rows:%d firstKey:%lld lastKey:%lld minAllowedKey:%lld maxAllowedKey:%lld", - pObj->vnode, pObj->sid, pObj->meterId, pVnode->lastKeyOnFile, rows, firstKey, lastKey, minAllowedKey, maxAllowedKey); - return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; + // Find the first importable record from payload + pImport->lastKey = lastKey; + for (payloadIter = 0; payloadIter < rows; payloadIter++) { + TSKEY key = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); + if (key == pObj->lastKey) continue; + if (key > pObj->lastKey) { // Just as insert + pImport->slot = pInfo->currentSlot; + pImport->pos = pInfo->cacheBlocks[pImport->slot]->numOfPoints; + isCacheIterEnd = 1; + break; + } else { + pImport->firstKey = key; + if (vnodeFindKeyInCache(pImport, 1) < 0) { + goto _exit; + } + + if (pImport->firstKey != pImport->key) break; + } } - // forward to peers - if (pShell && pVnode->cfg.replications > 1) { - code = vnodeForwardToPeer(pObj, cont, contLen, TSDB_ACTION_IMPORT, sversion); - if (code != 0) return code; + if (payloadIter == rows) { + pImport->importedRows = 0; + code = 0; + goto _exit; } - if (pVnode->cfg.commitLog && source != TSDB_DATA_SOURCE_LOG) { - if (pVnode->logFd < 0) return TSDB_CODE_INVALID_COMMIT_LOG; - code = vnodeWriteToCommitLog(pObj, TSDB_ACTION_IMPORT, cont, contLen, sversion); - if (code != 0) return code; + spayloadIter = payloadIter; + if (pImport->pos == pObj->pointsPerBlock) assert(isCacheIterEnd); + + // Allocate a new merge buffer work as buffer + totalRows = pObj->pointsPerBlock + rows - payloadIter + 1; + size = sizeof(SMergeBuffer) + sizeof(char *) * pObj->numOfColumns + pObj->bytesPerPoint * totalRows; + pBuffer = (SMergeBuffer *)malloc(size); + if (pBuffer == NULL) { + dError("vid:%d sid:%d meterId:%s, failed to allocate memory, size:%d", pObj->vnode, pObj->sid, pObj->meterId, size); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + pBuffer->spos = 0; + pBuffer->epos = 0; + pBuffer->totalRows = totalRows; + pBuffer->offset[0] = (char *)pBuffer + sizeof(SMergeBuffer) + sizeof(char *) * pObj->numOfColumns; + for (int col = 1; col < pObj->numOfColumns; col++) { + pBuffer->offset[col] = pBuffer->offset[col - 1] + pObj->schema[col - 1].bytes * totalRows; } - if (*((TSKEY *)(pSubmit->payLoad + (rows - 1) * pObj->bytesPerPoint)) > pObj->lastKey) { - vnodeClearMeterState(pObj, TSDB_METER_STATE_IMPORTING); - vnodeSetMeterState(pObj, TSDB_METER_STATE_INSERT); - code = vnodeInsertPoints(pObj, cont, contLen, TSDB_DATA_SOURCE_LOG, NULL, pObj->sversion, &pointsImported, now); + // TODO: take pImport->pos = pObj->pointsPerBlock into consideration + { // Do the merge staff + SBlockIter cacheIter = {pImport->slot, pImport->pos, 0, 0}; // Iter to traverse old cache data + SBlockIter writeIter = {pImport->slot, pImport->pos, 0, 0}; // Iter to write data to cache + int availPoints = pObj->pointsPerBlock - pInfo->cacheBlocks[pInfo->currentSlot]->numOfPoints; - if (pShell) { - pShell->code = code; - pShell->numOfTotalPoints += pointsImported; - } + assert(availPoints >= 0); - vnodeClearMeterState(pObj, TSDB_METER_STATE_INSERT); - } else { - SImportInfo *pNew, import; + while (1) { + if ((payloadIter >= rows) && isCacheIterEnd) break; - dTrace("vid:%d sid:%d id:%s, import %d rows data", pObj->vnode, pObj->sid, pObj->meterId, rows); - memset(&import, 0, sizeof(import)); - import.firstKey = *((TSKEY *)(payload)); - import.lastKey = *((TSKEY *)(pSubmit->payLoad + (rows - 1) * pObj->bytesPerPoint)); - import.pObj = pObj; - import.pShell = pShell; - import.payload = payload; - import.rows = rows; + if ((pBuffer->epos + 1) % pBuffer->totalRows == pBuffer->spos) { // merge buffer is full, flush + vnodeFlushMergeBuffer(pBuffer, &writeIter, &cacheIter, pObj, pInfo, 1); + } - int32_t num = 0; - pthread_mutex_lock(&pVnode->vmutex); - num = pObj->numOfQueries; - pthread_mutex_unlock(&pVnode->vmutex); + TSKEY payloadKey = (payloadIter < rows) ? KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) : INT64_MAX; + TSKEY cacheKey = (isCacheIterEnd) ? INT64_MAX : KEY_AT_INDEX(pInfo->cacheBlocks[cacheIter.slot]->offset[0], sizeof(TSKEY), cacheIter.pos); - int32_t commitInProcess = 0; + if (cacheKey < payloadKey) { // if (payload end || (cacheIter not end && payloadKey > blockKey)), consume cache + for (int col = 0; col < pObj->numOfColumns; col++) { + memcpy(pBuffer->offset[col] + pObj->schema[col].bytes * pBuffer->epos, + pInfo->cacheBlocks[cacheIter.slot]->offset[col] + pObj->schema[col].bytes * cacheIter.pos, + pObj->schema[col].bytes); + } + FORWARD_ITER(cacheIter, 1, pInfo->maxBlocks, pObj->pointsPerBlock); + isCacheIterEnd = isCacheEnd(cacheIter, pObj); + } else if (cacheKey > payloadKey) { // cacheIter end || (payloadIter not end && payloadKey < blockKey), consume payload + if (availPoints == 0) { // Need to allocate a new cache block + pthread_mutex_lock(&(pPool->vmutex)); + // TODO: Need to check if there are enough slots to hold a new one + SCacheBlock *pNewBlock = vnodeGetFreeCacheBlock(pVnode); + if (pNewBlock == NULL) { // Failed to allocate a new cache block, need to commit and loop over the remaining cache records + pthread_mutex_unlock(&(pPool->vmutex)); + payloadIter = rows; + code = TSDB_CODE_ACTION_IN_PROGRESS; + pImport->commit = 1; + continue; + } + + assert(pInfo->numOfBlocks <= pInfo->maxBlocks); + if (pInfo->numOfBlocks == pInfo->maxBlocks) { + vnodeFreeCacheBlock(pInfo->cacheBlocks[(pInfo->currentSlot + 1) % pInfo->maxBlocks]); + } - pthread_mutex_lock(&pPool->vmutex); - if (((commitInProcess = pPool->commitInProcess) == 1) || num > 0) { - pthread_mutex_unlock(&pPool->vmutex); + pNewBlock->pMeterObj = pObj; + pNewBlock->offset[0] = (char *)pNewBlock + sizeof(SCacheBlock) + sizeof(char *) * pObj->numOfColumns; + for (int col = 1; col < pObj->numOfColumns; col++) + pNewBlock->offset[col] = pNewBlock->offset[col - 1] + pObj->schema[col - 1].bytes * pObj->pointsPerBlock; + + int newSlot = (writeIter.slot + 1) % pInfo->maxBlocks; + pInfo->blocks++; + int tblockId = pInfo->blocks; + + if (writeIter.slot != pInfo->currentSlot) { + for (int tslot = pInfo->currentSlot; tslot != writeIter.slot;) { + int nextSlot = (tslot + 1) % pInfo->maxBlocks; + pInfo->cacheBlocks[nextSlot] = pInfo->cacheBlocks[tslot]; + pInfo->cacheBlocks[nextSlot]->slot = nextSlot; + pInfo->cacheBlocks[nextSlot]->blockId = tblockId--; + tslot = (tslot - 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + } + } - pNew = (SImportInfo *)malloc(sizeof(SImportInfo)); - memcpy(pNew, &import, sizeof(SImportInfo)); - pNew->signature = pNew; - int payloadLen = contLen - sizeof(SSubmitMsg); - pNew->payload = malloc(payloadLen); - pNew->opayload = pNew->payload; - memcpy(pNew->payload, payload, payloadLen); + int index = pNewBlock->index; + if (cacheIter.slot == writeIter.slot) { + pNewBlock->numOfPoints = pInfo->cacheBlocks[cacheIter.slot]->numOfPoints; + int pointsLeft = pInfo->cacheBlocks[cacheIter.slot]->numOfPoints - cacheIter.pos; + if (pointsLeft > 0) { + for (int col = 0; col < pObj->numOfColumns; col++) { + memcpy((void *)(pNewBlock->offset[col] + pObj->schema[col].bytes*cacheIter.pos), + pInfo->cacheBlocks[cacheIter.slot]->offset[col] + pObj->schema[col].bytes * cacheIter.pos, + pObj->schema[col].bytes * pointsLeft); + } + } + } + pNewBlock->blockId = tblockId; + pNewBlock->slot = newSlot; + pNewBlock->index = index; + pInfo->cacheBlocks[newSlot] = pNewBlock; + pInfo->numOfBlocks++; + pInfo->unCommittedBlocks++; + pInfo->currentSlot = (pInfo->currentSlot + 1) % pInfo->maxBlocks; + pthread_mutex_unlock(&(pPool->vmutex)); + cacheIter.slot = (cacheIter.slot + 1) % pInfo->maxBlocks; + // move a cache of data forward + availPoints = pObj->pointsPerBlock; + } - dTrace("vid:%d sid:%d id:%s, import later, commit in process:%d, numOfQueries:%d", pObj->vnode, pObj->sid, - pObj->meterId, commitInProcess, pObj->numOfQueries); + int offset = 0; + for (int col = 0; col < pObj->numOfColumns; col++) { + memcpy(pBuffer->offset[col] + pObj->schema[col].bytes * pBuffer->epos, + payload + pObj->bytesPerPoint * payloadIter + offset, pObj->schema[col].bytes); + offset += pObj->schema[col].bytes; + } + if (spayloadIter == payloadIter) {// update pVnode->firstKey + pthread_mutex_lock(&(pVnode->vmutex)); + if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) < pVnode->firstKey) pVnode->firstKey = firstKey; + pthread_mutex_unlock(&(pVnode->vmutex)); + } + if (isCacheIterEnd) { + pObj->lastKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); + if (!isAppendData) isAppendData = 1; + } - taosTmrStart(vnodeProcessImportTimer, 10, pNew, vnodeTmrCtrl); - return 0; - } else { - pPool->commitInProcess = 1; - pthread_mutex_unlock(&pPool->vmutex); - int code = vnodeImportData(pObj, &import); - if (pShell) { - pShell->code = code; - pShell->numOfTotalPoints += import.importedRows; + rowsImported++; + availPoints--; + payloadIter++; + + } else { + payloadIter++; + continue; } + pBuffer->epos = (pBuffer->epos + 1) % pBuffer->totalRows; + } + + if (pBuffer->spos != pBuffer->epos) { // Flush the remaining data in the merge buffer + vnodeFlushMergeBuffer(pBuffer, &writeIter, &cacheIter, pObj, pInfo, 0); + } else { + // Should never come here + assert(false); + } + + if (isAppendData) { + pthread_mutex_lock(&(pVnode->vmutex)); + if (pObj->lastKey > pVnode->lastKey) pVnode->lastKey = pObj->lastKey; + pthread_mutex_unlock(&(pVnode->vmutex)); } } + pImport->importedRows += rowsImported; + atomic_fetch_sub_32(&(pObj->freePoints), rowsImported); + + code = TSDB_CODE_SUCCESS; + +_exit: + tfree(pBuffer); + return code; +} + +int vnodeImportDataToFiles(SImportInfo *pImport, char *payload, const int rows) { + int code = 0; + // TODO : Check the correctness of pObj and pVnode + SMeterObj *pObj = (SMeterObj *)(pImport->pObj); + SVnodeObj *pVnode = vnodeList + pObj->vnode; + + int64_t delta = pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; + int sfid = KEY_AT_INDEX(payload, pObj->bytesPerPoint, 0) / delta; + int efid = KEY_AT_INDEX(payload, pObj->bytesPerPoint, rows - 1) / delta; + + for (int fid = sfid; fid <= efid; fid++) { + TSKEY skey = fid * delta; + TSKEY ekey = skey + delta - 1; + int srow = 0, nrows = 0; + + if (vnodeSearchKeyInRange(payload, pObj->bytesPerPoint, rows, skey, ekey, &srow, &nrows) < 0) continue; - pVnode->version++; + assert(nrows > 0); - if (pShell) { - pShell->count--; - if (pShell->count <= 0) vnodeSendShellSubmitRspMsg(pShell, pShell->code, pShell->numOfTotalPoints); + dTrace("vid:%d sid:%d meterId:%s, %d rows of data will be imported to file %d, srow:%d firstKey:%ld lastKey:%ld", + pObj->vnode, pObj->sid, pObj->meterId, nrows, fid, srow, KEY_AT_INDEX(payload, pObj->bytesPerPoint, srow), + KEY_AT_INDEX(payload, pObj->bytesPerPoint, (srow + nrows - 1))); + + code = vnodeMergeDataIntoFile(pImport, payload + (srow * pObj->bytesPerPoint), nrows, fid); + if (code != TSDB_CODE_SUCCESS) break; } - return 0; + return code; } -//todo abort from the procedure if the meter is going to be dropped +// TODO : add offset in pShell to make it avoid repeatedly deal with messages int vnodeImportData(SMeterObj *pObj, SImportInfo *pImport) { - int code = 0; + int code = 0; + int srow = 0, nrows = 0; + SVnodeObj * pVnode = vnodeList + pObj->vnode; + SCachePool *pPool = (SCachePool *)(pVnode->pCachePool); + + // 1. import data in range (pObj->lastKeyOnFile, INT64_MAX) into cache + if (vnodeSearchKeyInRange(pImport->payload, pObj->bytesPerPoint, pImport->rows, pObj->lastKeyOnFile + 1, INT64_MAX, + &srow, &nrows) >= 0) { + assert(nrows > 0); + code = vnodeImportDataToCache(pImport, pImport->payload + pObj->bytesPerPoint * srow, nrows); + if (pImport->commit) { // Need to commit now + pPool->commitInProcess = 0; + vnodeProcessCommitTimer(pVnode, NULL); + return code; + } - if (pImport->lastKey > pObj->lastKeyOnFile) { - code = vnodeImportWholeToCache(pImport, pImport->payload, pImport->rows); - } else if (pImport->lastKey < pObj->lastKeyOnFile) { - code = vnodeImportWholeToFile(pImport, pImport->payload, pImport->rows); - } else { // lastKey == pObj->lastkeyOnFile - code = vnodeImportStartToFile(pImport, pImport->payload, pImport->rows); + if (code != TSDB_CODE_SUCCESS) return code; } - SVnodeObj *pVnode = &vnodeList[pObj->vnode]; - SCachePool *pPool = (SCachePool *)pVnode->pCachePool; - pPool->commitInProcess = 0; + // 2. import data (0, pObj->lastKeyOnFile) into files + if (vnodeSearchKeyInRange(pImport->payload, pObj->bytesPerPoint, pImport->rows, 0, pObj->lastKeyOnFile - 1, &srow, + &nrows) >= 0) { + assert(nrows > 0); + code = vnodeImportDataToFiles(pImport, pImport->payload + pObj->bytesPerPoint * srow, nrows); + } - if (pImport->commit) vnodeProcessCommitTimer(pVnode, NULL); + pPool->commitInProcess = 0; return code; } diff --git a/src/system/detail/src/vnodeMeter.c b/src/system/detail/src/vnodeMeter.c index a595e8f689786857d255680cb19e6f3e2dc5c4fd..7cb4870eb27289e16c6ea3b040751a5af2a986a9 100644 --- a/src/system/detail/src/vnodeMeter.c +++ b/src/system/detail/src/vnodeMeter.c @@ -14,10 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" #include "trpc.h" #include "tschemautil.h" @@ -27,8 +24,7 @@ #include "vnodeMgmt.h" #include "vnodeShell.h" #include "vnodeUtil.h" - -#pragma GCC diagnostic ignored "-Wpointer-sign" +#include "vnodeStatus.h" #define VALID_TIMESTAMP(key, curKey, prec) (((key) >= 0) && ((key) <= ((curKey) + 36500 * tsMsPerDay[prec]))) @@ -115,7 +111,7 @@ FILE *vnodeOpenMeterObjFile(int vnode) { fp = fopen(fileName, "r+"); if (fp != NULL) { if (vnodeCheckFileIntegrity(fp) < 0) { - dError("file:%s is corrupted, need to restore it first", fileName); + dError("file:%s is corrupted, need to restore it first, exit program", fileName); fclose(fp); // todo: how to recover @@ -379,7 +375,11 @@ int vnodeOpenMetersVnode(int vnode) { fseek(fp, TSDB_FILE_HEADER_LEN * 2 / 4, SEEK_SET); fread(&pVnode->cfg, sizeof(SVnodeCfg), 1, fp); + if (vnodeIsValidVnodeCfg(&pVnode->cfg) == false) { + dError("vid:%d, maxSessions:%d cacheBlockSize:%d replications:%d daysPerFile:%d daysToKeep:%d invalid, clear it", + vnode, pVnode->cfg.maxSessions, pVnode->cfg.cacheBlockSize, pVnode->cfg.replications, + pVnode->cfg.daysPerFile, pVnode->cfg.daysToKeep); pVnode->cfg.maxSessions = 0; // error in vnode file return 0; } @@ -487,7 +487,7 @@ int vnodeCreateMeterObj(SMeterObj *pNew, SConnSec *pSec) { vnodeSaveMeterObjToFile(pNew); // vnodeCreateMeterMgmt(pNew, pSec); vnodeCreateStream(pNew); - dTrace("vid:%d sid:%d id:%s, meterObj is created, uid:%ld", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); + dTrace("vid:%d, sid:%d id:%s, meterObj is created, uid:%ld", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); } return code; @@ -520,7 +520,7 @@ int vnodeRemoveMeterObj(int vnode, int sid) { } // after remove this meter, change its state to DELETED - pObj->state = TSDB_METER_STATE_DELETED; + pObj->state = TSDB_METER_STATE_DROPPED; pObj->timeStamp = taosGetTimestampMs(); vnodeList[vnode].lastRemove = pObj->timeStamp; @@ -575,19 +575,19 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi dTrace("vid:%d sid:%d id:%s, cache is full, freePoints:%d, notFreeSlots:%d", pObj->vnode, pObj->sid, pObj->meterId, pObj->freePoints, pPool->notFreeSlots); vnodeProcessCommitTimer(pVnode, NULL); - return TSDB_CODE_ACTION_IN_PROGRESS; + return code; } // FIXME: Here should be after the comparison of sversions. if (pVnode->cfg.commitLog && source != TSDB_DATA_SOURCE_LOG) { if (pVnode->logFd < 0) return TSDB_CODE_INVALID_COMMIT_LOG; code = vnodeWriteToCommitLog(pObj, TSDB_ACTION_INSERT, cont, contLen, sversion); - if (code != 0) return code; + if (code != TSDB_CODE_SUCCESS) return code; } if (source == TSDB_DATA_SOURCE_SHELL && pVnode->cfg.replications > 1) { code = vnodeForwardToPeer(pObj, cont, contLen, TSDB_ACTION_INSERT, sversion); - if (code != 0) return code; + if (code != TSDB_CODE_SUCCESS) return code; } if (pObj->sversion < sversion) { @@ -599,26 +599,29 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi } pData = pSubmit->payLoad; - code = 0; TSKEY firstKey = *((TSKEY *)pData); TSKEY lastKey = *((TSKEY *)(pData + pObj->bytesPerPoint * (numOfPoints - 1))); - int cfid = now/pVnode->cfg.daysPerFile/tsMsPerDay[pVnode->cfg.precision]; - TSKEY minAllowedKey = (cfid - pVnode->maxFiles + 1)*pVnode->cfg.daysPerFile*tsMsPerDay[pVnode->cfg.precision]; - TSKEY maxAllowedKey = (cfid + 2)*pVnode->cfg.daysPerFile*tsMsPerDay[pVnode->cfg.precision] - 2; + int cfid = now/pVnode->cfg.daysPerFile/tsMsPerDay[(uint8_t)pVnode->cfg.precision]; + + TSKEY minAllowedKey = (cfid - pVnode->maxFiles + 1)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision]; + TSKEY maxAllowedKey = (cfid + 2)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision] - 2; if (firstKey < minAllowedKey || firstKey > maxAllowedKey || lastKey < minAllowedKey || lastKey > maxAllowedKey) { dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%lld, data is out of range, numOfPoints:%d firstKey:%lld lastKey:%lld minAllowedKey:%lld maxAllowedKey:%lld", pObj->vnode, pObj->sid, pObj->meterId, pVnode->lastKeyOnFile, numOfPoints,firstKey, lastKey, minAllowedKey, maxAllowedKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } - - for (i = 0; i < numOfPoints; ++i) { - // meter will be dropped, abort current insertion - if (pObj->state >= TSDB_METER_STATE_DELETING) { + + if ((code = vnodeSetMeterInsertImportStateEx(pObj, TSDB_METER_STATE_INSERTING)) != TSDB_CODE_SUCCESS) { + goto _over; + } + + for (i = 0; i < numOfPoints; ++i) { // meter will be dropped, abort current insertion + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dWarn("vid:%d sid:%d id:%s, meter is dropped, abort insert, state:%d", pObj->vnode, pObj->sid, pObj->meterId, pObj->state); - code = TSDB_CODE_NOT_ACTIVE_SESSION; + code = TSDB_CODE_NOT_ACTIVE_TABLE; break; } @@ -629,7 +632,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi continue; } - if (!VALID_TIMESTAMP(*((TSKEY *)pData), tsKey, pVnode->cfg.precision)) { + if (!VALID_TIMESTAMP(*((TSKEY *)pData), tsKey, (uint8_t)pVnode->cfg.precision)) { code = TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; break; } @@ -643,8 +646,9 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi pData += pObj->bytesPerPoint; points++; } - __sync_fetch_and_add(&(pVnode->vnodeStatistic.pointsWritten), points * (pObj->numOfColumns - 1)); - __sync_fetch_and_add(&(pVnode->vnodeStatistic.totalStorage), points * pObj->bytesPerPoint); + + atomic_fetch_add_64(&(pVnode->vnodeStatistic.pointsWritten), points * (pObj->numOfColumns - 1)); + atomic_fetch_add_64(&(pVnode->vnodeStatistic.totalStorage), points * pObj->bytesPerPoint); pthread_mutex_lock(&(pVnode->vmutex)); @@ -655,6 +659,8 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi pVnode->version++; pthread_mutex_unlock(&(pVnode->vmutex)); + + vnodeClearMeterState(pObj, TSDB_METER_STATE_INSERTING); _over: dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%ld source:%d, vnode total storage: %ld", @@ -712,14 +718,15 @@ void vnodeUpdateMeter(void *param, void *tmrId) { SVnodeObj* pVnode = &vnodeList[pNew->vnode]; if (pVnode->meterList == NULL) { - dTrace("vid:%d sid:%d id:%s, vnode is deleted, abort update schema", pNew->vnode, pNew->sid, pNew->meterId); + dTrace("vid:%d sid:%d id:%s, vnode is deleted, status:%s, abort update schema", + pNew->vnode, pNew->sid, pNew->meterId, taosGetVnodeStatusStr(vnodeList[pNew->vnode].vnodeStatus)); free(pNew->schema); free(pNew); return; } SMeterObj *pObj = pVnode->meterList[pNew->sid]; - if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dTrace("vid:%d sid:%d id:%s, meter is deleted, abort update schema", pNew->vnode, pNew->sid, pNew->meterId); free(pNew->schema); free(pNew); @@ -727,7 +734,7 @@ void vnodeUpdateMeter(void *param, void *tmrId) { } int32_t state = vnodeSetMeterState(pObj, TSDB_METER_STATE_UPDATING); - if (state >= TSDB_METER_STATE_DELETING) { + if (state >= TSDB_METER_STATE_DROPPING) { dError("vid:%d sid:%d id:%s, meter is deleted, failed to update, state:%d", pObj->vnode, pObj->sid, pObj->meterId, state); return; diff --git a/src/system/detail/src/vnodeQueryImpl.c b/src/system/detail/src/vnodeQueryImpl.c index 187269d30b1b374778dc5646677a2d275a3bce27..a34a7bd06a67c1e216d509f7908f47e82ca4f12f 100644 --- a/src/system/detail/src/vnodeQueryImpl.c +++ b/src/system/detail/src/vnodeQueryImpl.c @@ -31,6 +31,7 @@ #include "vnodeDataFilterFunc.h" #include "vnodeFile.h" #include "vnodeQueryImpl.h" +#include "vnodeStatus.h" enum { TS_JOIN_TS_EQUAL = 0, @@ -38,16 +39,22 @@ enum { TS_JOIN_TAG_NOT_EQUALS = 2, }; +enum { + DISK_BLOCK_NO_NEED_TO_LOAD = 0, + DISK_BLOCK_LOAD_TS = 1, + DISK_BLOCK_LOAD_BLOCK = 2, +}; + #define IS_DISK_DATA_BLOCK(q) ((q)->fileId >= 0) -static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, - int32_t size); -static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, +// static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFilesInfo *pQueryFile, char *buf, uint64_t +// offset, int32_t size); +static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFilesInfo *pQueryFile, char *buf, uint64_t offset, int32_t size); -__read_data_fn_t readDataFunctor[2] = {copyDataFromMMapBuffer, readDataFromDiskFile}; +//__read_data_fn_t readDataFunctor[2] = {copyDataFromMMapBuffer, readDataFromDiskFile}; -static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo); +static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo); static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn, bool loadData); static int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, @@ -57,7 +64,7 @@ static int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, static TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index); static TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index); -static void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, int32_t pos); +static void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, int32_t pos); static int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t step); static void setGroupOutputBuffer(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResult); @@ -67,18 +74,18 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields, __block_search_fn_t searchFn); -static void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult); -static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, - int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, - SField *pFields, __block_search_fn_t searchFn); +static int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult); +static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, + int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, + SField *pFields, __block_search_fn_t searchFn); -static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx); -static void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, - const SQueryRuntimeEnv *pRuntimeEnv); -static void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes); -static void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid); -static void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn); -static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); +static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx); +static int32_t flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, + const SQueryRuntimeEnv *pRuntimeEnv); +static void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes); +static void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid); +static void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn); +static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); // check the offset value integrity static FORCE_INLINE int32_t validateHeaderOffsetSegment(SQInfo *pQInfo, char *filePath, int32_t vid, char *data, @@ -99,7 +106,7 @@ static FORCE_INLINE int32_t getCompHeaderStartPosition(SVnodeCfg *pCfg) { } static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *pMeterObj, SCompHeader *pCompHeader, - SQueryFileInfo *pQueryFileInfo, int32_t headerSize) { + SQueryFilesInfo *pQueryFileInfo, int32_t headerSize) { if (pCompHeader->compInfoOffset < headerSize || pCompHeader->compInfoOffset > pQueryFileInfo->headFileSize) { dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid, size:%ld", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pCompHeader->compInfoOffset, pQueryFileInfo->headFileSize); @@ -111,7 +118,7 @@ static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *p } // check compinfo integrity -static FORCE_INLINE int32_t validateCompBlockInfoSegment(SQInfo *pQInfo, char *filePath, int32_t vid, +static FORCE_INLINE int32_t validateCompBlockInfoSegment(SQInfo *pQInfo, const char *filePath, int32_t vid, SCompInfo *compInfo, int64_t offset) { if (!taosCheckChecksumWhole((uint8_t *)compInfo, sizeof(SCompInfo))) { dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%lld", pQInfo, vid, filePath, @@ -121,8 +128,8 @@ static FORCE_INLINE int32_t validateCompBlockInfoSegment(SQInfo *pQInfo, char *f return 0; } -static FORCE_INLINE int32_t validateCompBlockSegment(SQInfo *pQInfo, char *filePath, SCompInfo *compInfo, char *pBlock, - int32_t vid, TSCKSUM checksum) { +static FORCE_INLINE int32_t validateCompBlockSegment(SQInfo *pQInfo, const char *filePath, SCompInfo *compInfo, + char *pBlock, int32_t vid, TSCKSUM checksum) { uint32_t size = compInfo->numOfBlocks * sizeof(SCompBlock); if (checksum != taosCalcChecksum(0, (uint8_t *)pBlock, size)) { @@ -195,7 +202,8 @@ static bool vnodeIsCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj // if vnodeFreeFields is called, the pQuery->pFields is NULL if (pLoadCompBlockInfo->fileListIndex == fileIndex && pLoadCompBlockInfo->sid == pMeterObj->sid && pQuery->pFields != NULL && pQuery->fileId > 0) { - assert(pRuntimeEnv->pHeaderFiles[fileIndex].fileID == pLoadCompBlockInfo->fileId && pQuery->numOfBlocks > 0); + assert(pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID == pLoadCompBlockInfo->fileId && + pQuery->numOfBlocks > 0); return true; } @@ -207,7 +215,7 @@ static void vnodeSetCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, int32_t f pLoadCompBlockInfo->sid = sid; pLoadCompBlockInfo->fileListIndex = fileIndex; - pLoadCompBlockInfo->fileId = pRuntimeEnv->pHeaderFiles[fileIndex].fileID; + pLoadCompBlockInfo->fileId = pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID; } static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo) { @@ -216,7 +224,8 @@ static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadIn pCompBlockLoadInfo->fileListIndex = -1; } -static bool vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex) { +static int32_t vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex, + bool loadPrimaryTS) { SQuery * pQuery = pRuntimeEnv->pQuery; SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; @@ -224,13 +233,20 @@ static bool vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMe if (pLoadInfo->fileId == pQuery->fileId && pLoadInfo->slotIdx == pQuery->slot && pQuery->slot != -1 && pLoadInfo->sid == pMeterObj->sid) { assert(fileIndex == pLoadInfo->fileListIndex); - return true; + + // previous load operation does not load the primary timestamp column, we only need to load the timestamp column + if (pLoadInfo->tsLoaded == false && pLoadInfo->tsLoaded != loadPrimaryTS) { + return DISK_BLOCK_LOAD_TS; + } else { + return DISK_BLOCK_NO_NEED_TO_LOAD; + } } - return false; + return DISK_BLOCK_LOAD_BLOCK; } -static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex) { +static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex, + bool tsLoaded) { SQuery * pQuery = pRuntimeEnv->pQuery; SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; @@ -238,6 +254,7 @@ static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj pLoadInfo->slotIdx = pQuery->slot; pLoadInfo->fileListIndex = fileIndex; pLoadInfo->sid = pMeterObj->sid; + pLoadInfo->tsLoaded = tsLoaded; } static void vnodeInitDataBlockInfo(SQueryLoadBlockInfo *pBlockLoadInfo) { @@ -247,6 +264,195 @@ static void vnodeInitDataBlockInfo(SQueryLoadBlockInfo *pBlockLoadInfo) { pBlockLoadInfo->fileListIndex = -1; } +static void vnodeSetOpenedFileNames(SQueryFilesInfo *pVnodeFilesInfo) { + assert(pVnodeFilesInfo->current >= 0 && pVnodeFilesInfo->current < pVnodeFilesInfo->numOfFiles); + + SHeaderFileInfo *pCurrentFileInfo = &pVnodeFilesInfo->pFileInfo[pVnodeFilesInfo->current]; + + /* + * set the full file path for current opened files + * the maximum allowed path string length is PATH_MAX in Linux, 100 bytes is used to + * suppress the compiler warnings + */ + char str[PATH_MAX + 100] = {0}; + int32_t PATH_WITH_EXTRA = PATH_MAX + 100; + + int32_t vnodeId = pVnodeFilesInfo->vnodeId; + int32_t fileId = pCurrentFileInfo->fileID; + + int32_t len = snprintf(str, PATH_WITH_EXTRA, "%sv%df%d.head", pVnodeFilesInfo->dbFilePathPrefix, vnodeId, fileId); + assert(len <= PATH_MAX); + + strncpy(pVnodeFilesInfo->headerFilePath, str, PATH_MAX); + + len = snprintf(str, PATH_WITH_EXTRA, "%sv%df%d.data", pVnodeFilesInfo->dbFilePathPrefix, vnodeId, fileId); + assert(len <= PATH_MAX); + + strncpy(pVnodeFilesInfo->dataFilePath, str, PATH_MAX); + + len = snprintf(str, PATH_WITH_EXTRA, "%sv%df%d.last", pVnodeFilesInfo->dbFilePathPrefix, vnodeId, fileId); + assert(len <= PATH_MAX); + + strncpy(pVnodeFilesInfo->lastFilePath, str, PATH_MAX); +} + +/** + * if the header is smaller than a threshold value(header size + initial offset value) + * + * @param vnodeId + * @param headerFileSize + * @return + */ +static FORCE_INLINE bool isHeaderFileEmpty(int32_t vnodeId, size_t headerFileSize) { + SVnodeCfg *pVnodeCfg = &vnodeList[vnodeId].cfg; + return headerFileSize <= getCompHeaderStartPosition(pVnodeCfg); +} + +static bool checkIsHeaderFileEmpty(SQueryFilesInfo *pVnodeFilesInfo, int32_t vnodeId) { + struct stat fstat = {0}; + if (stat(pVnodeFilesInfo->headerFilePath, &fstat) < 0) { + return true; + } + + pVnodeFilesInfo->headFileSize = fstat.st_size; + + return isHeaderFileEmpty(vnodeId, pVnodeFilesInfo->headFileSize); +} + +static void doCloseQueryFileInfoFD(SQueryFilesInfo *pVnodeFilesInfo) { + tclose(pVnodeFilesInfo->headerFd); + tclose(pVnodeFilesInfo->dataFd); + tclose(pVnodeFilesInfo->lastFd); +} + +static void doInitQueryFileInfoFD(SQueryFilesInfo *pVnodeFilesInfo) { + pVnodeFilesInfo->current = -1; + pVnodeFilesInfo->headFileSize = -1; + + pVnodeFilesInfo->headerFd = FD_INITIALIZER; // set the initial value + pVnodeFilesInfo->dataFd = FD_INITIALIZER; + pVnodeFilesInfo->lastFd = FD_INITIALIZER; +} + +/* + * clean memory and other corresponding resources are delegated to invoker + */ +static int32_t doOpenQueryFileData(SQInfo *pQInfo, SQueryFilesInfo *pVnodeFileInfo, int32_t vnodeId) { + SHeaderFileInfo *pHeaderFileInfo = &pVnodeFileInfo->pFileInfo[pVnodeFileInfo->current]; + + pVnodeFileInfo->headerFd = open(pVnodeFileInfo->headerFilePath, O_RDONLY); + if (!FD_VALID(pVnodeFileInfo->headerFd)) { + dError("QInfo:%p failed open head file:%s reason:%s", pQInfo, pVnodeFileInfo->headerFilePath, strerror(errno)); + return -1; + } + + /* + * current header file is empty or broken, return directly. + * + * if the header is smaller than a threshold value, this file is empty, no need to open these files + * the header file only to be opened, then the check of file size is available. Otherwise, the file may be + * replaced by new header file when opening the header file and then cause the miss check of file size + */ + if (checkIsHeaderFileEmpty(pVnodeFileInfo, vnodeId)) { + qTrace("QInfo:%p vid:%d, fileId:%d, index:%d, size:%d, ignore file, empty or broken", pQInfo, + pVnodeFileInfo->vnodeId, pHeaderFileInfo->fileID, pVnodeFileInfo->current, pVnodeFileInfo->headFileSize); + + return -1; + } + + pVnodeFileInfo->dataFd = open(pVnodeFileInfo->dataFilePath, O_RDONLY); + if (!FD_VALID(pVnodeFileInfo->dataFd)) { + dError("QInfo:%p failed open data file:%s reason:%s", pQInfo, pVnodeFileInfo->dataFilePath, strerror(errno)); + return -1; + } + + pVnodeFileInfo->lastFd = open(pVnodeFileInfo->lastFilePath, O_RDONLY); + if (!FD_VALID(pVnodeFileInfo->lastFd)) { + dError("QInfo:%p failed open last file:%s reason:%s", pQInfo, pVnodeFileInfo->lastFilePath, strerror(errno)); + return -1; + } + +// pVnodeFileInfo->pHeaderFileData = +// mmap(NULL, pVnodeFileInfo->headFileSize, PROT_READ, MAP_SHARED, pVnodeFileInfo->headerFd, 0); +// +// if (pVnodeFileInfo->pHeaderFileData == MAP_FAILED) { +// pVnodeFileInfo->pHeaderFileData = NULL; +// +// doCloseQueryFileInfoFD(pVnodeFileInfo); +// doInitQueryFileInfoFD(pVnodeFileInfo); +// +// dError("QInfo:%p failed to mmap header file:%s, size:%lld, %s", pQInfo, pVnodeFileInfo->headerFilePath, +// pVnodeFileInfo->headFileSize, strerror(errno)); +// +// return -1; +// } else { +// if (madvise(pVnodeFileInfo->pHeaderFileData, pVnodeFileInfo->headFileSize, MADV_SEQUENTIAL) == -1) { +// dError("QInfo:%p failed to advise kernel the usage of header file, reason:%s", pQInfo, strerror(errno)); +// } +// } + + return TSDB_CODE_SUCCESS; +} + +static void doUnmapHeaderFile(SQueryFilesInfo *pVnodeFileInfo) { + munmap(pVnodeFileInfo->pHeaderFileData, pVnodeFileInfo->headFileSize); + pVnodeFileInfo->pHeaderFileData = NULL; + pVnodeFileInfo->headFileSize = -1; +} + +static void doCloseOpenedFileData(SQueryFilesInfo *pVnodeFileInfo) { + if (pVnodeFileInfo->current >= 0) { + assert(pVnodeFileInfo->current < pVnodeFileInfo->numOfFiles && pVnodeFileInfo->current >= 0); + + doUnmapHeaderFile(pVnodeFileInfo); + doCloseQueryFileInfoFD(pVnodeFileInfo); + doInitQueryFileInfoFD(pVnodeFileInfo); + } + + assert(pVnodeFileInfo->current == -1); +} + +/** + * mmap the data file into memory. For each query, only one header file is allowed to mmap into memory, in order to + * avoid too many memory mapped files at the save time to cause OS return the message of "Cannot allocate memory", + * during query processing. + * + * @param pRuntimeEnv + * @param fileIndex + * @return the return value may be null, so any invoker needs to check the returned value + */ +char *vnodeGetHeaderFileData(SQueryRuntimeEnv *pRuntimeEnv, int32_t vnodeId, int32_t fileIndex) { + assert(fileIndex >= 0 && fileIndex < pRuntimeEnv->vnodeFileInfo.numOfFiles); + + SQuery *pQuery = pRuntimeEnv->pQuery; + SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); // only for log output + + SQueryFilesInfo *pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; + + if (pVnodeFileInfo->current != fileIndex || pVnodeFileInfo->pHeaderFileData == NULL) { + if (pVnodeFileInfo->current >= 0) { +// assert(pVnodeFileInfo->pHeaderFileData != NULL); + } + + // do close the current memory mapped header file and corresponding fd + doCloseOpenedFileData(pVnodeFileInfo); + assert(pVnodeFileInfo->pHeaderFileData == NULL); + + // set current opened file Index + pVnodeFileInfo->current = fileIndex; + + // set the current opened files(header, data, last) path + vnodeSetOpenedFileNames(pVnodeFileInfo); + + if (doOpenQueryFileData(pQInfo, pVnodeFileInfo, vnodeId) != TSDB_CODE_SUCCESS) { + doCloseOpenedFileData(pVnodeFileInfo); // all the fds may be partially opened, close them anyway. + return pVnodeFileInfo->pHeaderFileData; + } + } + + return 1;//pVnodeFileInfo->pHeaderFileData; +} + /* * read comp block info from header file * @@ -255,15 +461,14 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); - SVnodeCfg * pCfg = &vnodeList[pMeterObj->vnode].cfg; - SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIndex]; - int32_t fd = pQueryFileInfo->headerFd; + SVnodeCfg * pCfg = &vnodeList[pMeterObj->vnode].cfg; + SHeaderFileInfo *pHeadeFileInfo = &pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex]; int64_t st = taosGetTimestampUs(); if (vnodeIsCompBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIndex)) { dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d compBlock info is loaded, not reload", GET_QINFO_ADDR(pQuery), - pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQueryFileInfo->fileID); + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pHeadeFileInfo->fileID); return pQuery->numOfBlocks; } @@ -273,43 +478,63 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim pSummary->numOfSeek++; #if 1 - char *data = pRuntimeEnv->pHeaderFiles[fileIndex].pHeaderFileData; - UNUSED(fd); + char *data = vnodeGetHeaderFileData(pRuntimeEnv, pMeterObj->vnode, fileIndex); + if (data == NULL) { + return -1; // failed to load the header file data into memory + } + #else char *data = calloc(1, tmsize + TSDB_FILE_HEADER_LEN); read(fd, data, tmsize + TSDB_FILE_HEADER_LEN); #endif + + int64_t offset = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pMeterObj->sid; +#if 0 // check the offset value integrity - if (validateHeaderOffsetSegment(pQInfo, pQueryFileInfo->headerFilePath, pMeterObj->vnode, data, + if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, data, getCompHeaderSegSize(pCfg)) < 0) { return -1; } - int64_t offset = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pMeterObj->sid; SCompHeader *compHeader = (SCompHeader *)(data + offset); - // no data in this file for specified meter, abort if (compHeader->compInfoOffset == 0) { return 0; } // corrupted file may cause the invalid compInfoOffset, check needs - if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, pQueryFileInfo, getCompHeaderStartPosition(pCfg)) < 0) { + if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, &pRuntimeEnv->vnodeFileInfo, + getCompHeaderStartPosition(pCfg)) < 0) { return -1; } +#else + char* buf = calloc(1, getCompHeaderSegSize(pCfg)); + SQueryFilesInfo *pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; + + lseek(pVnodeFileInfo->headerFd, TSDB_FILE_HEADER_LEN, SEEK_SET); + read(pVnodeFileInfo->headerFd, buf, getCompHeaderSegSize(pCfg)); + + // check the offset value integrity + if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, buf - TSDB_FILE_HEADER_LEN, + getCompHeaderSegSize(pCfg)) < 0) { + return -1; + } +#endif -#if 1 +#if 0 SCompInfo *compInfo = (SCompInfo *)(data + compHeader->compInfoOffset); #else - lseek(fd, compHeader->compInfoOffset, SEEK_SET); + SCompHeader *compHeader = (SCompHeader *)(buf + sizeof(SCompHeader) * pMeterObj->sid); + lseek(pVnodeFileInfo->headerFd, compHeader->compInfoOffset, SEEK_SET); + SCompInfo CompInfo = {0}; SCompInfo *compInfo = &CompInfo; - read(fd, compInfo, sizeof(SCompInfo)); + read(pVnodeFileInfo->headerFd, compInfo, sizeof(SCompInfo)); #endif // check compblock info integrity - if (validateCompBlockInfoSegment(pQInfo, pQueryFileInfo->headerFilePath, pMeterObj->vnode, compInfo, + if (validateCompBlockInfoSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, compInfo, compHeader->compInfoOffset) < 0) { return -1; } @@ -333,19 +558,19 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim memset(pQuery->pBlock, 0, (size_t)pQuery->blockBufferSize); -#if 1 +#if 0 memcpy(pQuery->pBlock, (char *)compInfo + sizeof(SCompInfo), (size_t)compBlockSize); TSCKSUM checksum = *(TSCKSUM *)((char *)compInfo + sizeof(SCompInfo) + compBlockSize); #else TSCKSUM checksum; - read(fd, pQuery->pBlock, compBlockSize); - read(fd, &checksum, sizeof(TSCKSUM)); + read(pVnodeFileInfo->headerFd, pQuery->pBlock, compBlockSize); + read(pVnodeFileInfo->headerFd, &checksum, sizeof(TSCKSUM)); #endif // check comp block integrity - if (validateCompBlockSegment(pQInfo, pQueryFileInfo->headerFilePath, compInfo, (char *)pQuery->pBlock, + if (validateCompBlockSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, compInfo, (char *)pQuery->pBlock, pMeterObj->vnode, checksum) < 0) { - return -1; + return -1; //TODO free resource in error process } pQuery->pFields = (SField **)((char *)pQuery->pBlock + compBlockSize); @@ -353,12 +578,13 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim int64_t et = taosGetTimestampUs(); qTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, load compblock info, size:%d, elapsed:%f ms", pQInfo, - pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pRuntimeEnv->pHeaderFiles[fileIndex].fileID, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID, compBlockSize, (et - st) / 1000.0); pSummary->totalCompInfoSize += compBlockSize; pSummary->loadCompInfoUs += (et - st); - + + free(buf); return pQuery->numOfBlocks; } @@ -379,7 +605,7 @@ static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t StartQue char *primaryColumnData, int32_t size, int32_t functionId, SField *pField, bool hasNull, int32_t blockStatus, void *param, int32_t scanFlag); -void createGroupResultBuf(SQuery *pQuery, SOutputRes *pOneResult, bool isMetricQuery); +void createGroupResultBuf(SQuery *pQuery, SOutputRes *pOneResult, bool isMetricQuery); static void destroyGroupResultBuf(SOutputRes *pOneOutputRes, int32_t nOutputCols); static int32_t binarySearchForBlockImpl(SCompBlock *pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { @@ -413,8 +639,9 @@ static int32_t binarySearchForBlock(SQuery *pQuery, int64_t key) { return binarySearchForBlockImpl(pQuery->pBlock, pQuery->numOfBlocks, key, pQuery->order.order); } +#if 0 /* unmap previous buffer */ -static UNUSED_FUNC int32_t resetMMapWindow(SQueryFileInfo *pQueryFileInfo) { +static UNUSED_FUNC int32_t resetMMapWindow(SHeaderFileInfo *pQueryFileInfo) { munmap(pQueryFileInfo->pDataFileData, pQueryFileInfo->defaultMappingSize); pQueryFileInfo->dtFileMappingOffset = 0; @@ -428,7 +655,7 @@ static UNUSED_FUNC int32_t resetMMapWindow(SQueryFileInfo *pQueryFileInfo) { return 0; } -static int32_t moveMMapWindow(SQueryFileInfo *pQueryFileInfo, uint64_t offset) { +static int32_t moveMMapWindow(SHeaderFileInfo *pQueryFileInfo, uint64_t offset) { uint64_t upperBnd = (pQueryFileInfo->dtFileMappingOffset + pQueryFileInfo->defaultMappingSize - 1); /* data that are located in current mmapping window */ @@ -439,10 +666,10 @@ static int32_t moveMMapWindow(SQueryFileInfo *pQueryFileInfo, uint64_t offset) { } /* - * 1. there is import data that locate farther from the beginning, but with less timestamp, so we need to move the - * window backwards - * 2. otherwise, move the mmaping window forward - */ + * 1. there is import data that locate farther from the beginning, but with less timestamp, so we need to move the + * window backwards + * 2. otherwise, move the mmaping window forward + */ upperBnd = (offset / pQueryFileInfo->defaultMappingSize + 1) * pQueryFileInfo->defaultMappingSize - 1; /* unmap previous buffer */ @@ -475,7 +702,7 @@ static int32_t moveMMapWindow(SQueryFileInfo *pQueryFileInfo, uint64_t offset) { return 0; } -static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, +static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SHeaderFileInfo *pQueryFile, char *buf, uint64_t offset, int32_t size) { assert(size >= 0); @@ -526,7 +753,9 @@ static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFileInfo *pQ return 0; } -static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, +#endif + +static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFilesInfo *pQueryFile, char *buf, uint64_t offset, int32_t size) { assert(size >= 0); @@ -541,7 +770,7 @@ static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFileInfo *pQue return 0; } -static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFileInfo *pQueryFileInfo, SCompBlock *pBlock, SField *pFields, +static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFilesInfo *pQueryFileInfo, SCompBlock *pBlock, SField *pFields, int32_t col, SData *sdata, void *tmpBuf, char *buffer, int32_t buffersize) { char *dst = (pBlock->algorithm) ? tmpBuf : sdata->data; @@ -549,15 +778,14 @@ static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFileInfo *pQueryFileInfo, SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); int fd = pBlock->last ? pQueryFileInfo->lastFd : pQueryFileInfo->dataFd; - int32_t ret = (*readDataFunctor[DEFAULT_IO_ENGINE])(fd, pQInfo, pQueryFileInfo, dst, offset, pFields[col].len); + int32_t ret = readDataFromDiskFile(fd, pQInfo, pQueryFileInfo, dst, offset, pFields[col].len); if (ret != 0) { return ret; } // load checksum TSCKSUM checksum = 0; - ret = (*readDataFunctor[DEFAULT_IO_ENGINE])(fd, pQInfo, pQueryFileInfo, (char *)&checksum, offset + pFields[col].len, - sizeof(TSCKSUM)); + ret = readDataFromDiskFile(fd, pQInfo, pQueryFileInfo, (char *)&checksum, offset + pFields[col].len, sizeof(TSCKSUM)); if (ret != 0) { return ret; } @@ -578,11 +806,11 @@ static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFileInfo *pQueryFileInfo, return 0; } -static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SQueryFileInfo *pQueryFileInfo, - SCompBlock *pBlock, SField **pField) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); - SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; +static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SCompBlock *pBlock, SField **pField) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + SQueryFilesInfo *pVnodeFilesInfo = &pRuntimeEnv->vnodeFileInfo; size_t size = sizeof(SField) * (pBlock->numOfCols) + sizeof(TSCKSUM); @@ -598,9 +826,8 @@ static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SQueryFile int64_t st = taosGetTimestampUs(); - int fd = pBlock->last ? pQueryFileInfo->lastFd : pQueryFileInfo->dataFd; - int32_t ret = - (*readDataFunctor[DEFAULT_IO_ENGINE])(fd, pQInfo, pQueryFileInfo, (char *)(*pField), pBlock->offset, size); + int fd = pBlock->last ? pVnodeFilesInfo->lastFd : pVnodeFilesInfo->dataFd; + int32_t ret = readDataFromDiskFile(fd, pQInfo, pVnodeFilesInfo, (char *)(*pField), pBlock->offset, size); if (ret != 0) { return ret; } @@ -608,7 +835,7 @@ static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SQueryFile // check fields integrity if (!taosCheckChecksumWhole((uint8_t *)(*pField), size)) { dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%lld", pQInfo, - pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pQueryFileInfo->dataFilePath, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pVnodeFilesInfo->dataFilePath, pBlock->offset); return -1; } @@ -628,6 +855,21 @@ static void fillWithNull(SQuery *pQuery, char *dst, int32_t col, int32_t numOfPo setNullN(dst, type, bytes, numOfPoints); } +static int32_t loadPrimaryTSColumn(SQueryRuntimeEnv *pRuntimeEnv, SCompBlock *pBlock, SField **pField, + int32_t *columnBytes) { + SQuery *pQuery = pRuntimeEnv->pQuery; + assert(PRIMARY_TSCOL_LOADED(pQuery) == false); + + if (columnBytes != NULL) { + (*columnBytes) += (*pField)[PRIMARYKEY_TIMESTAMP_COL_INDEX].len + sizeof(TSCKSUM); + } + + int32_t ret = loadColumnIntoMem(pQuery, &pRuntimeEnv->vnodeFileInfo, pBlock, *pField, PRIMARYKEY_TIMESTAMP_COL_INDEX, + pRuntimeEnv->primaryColBuffer, pRuntimeEnv->unzipBuffer, + pRuntimeEnv->secondaryUnzipBuffer, pRuntimeEnv->unzipBufSize); + return ret; +} + static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIdx, bool loadPrimaryCol, bool loadSField) { int32_t i = 0, j = 0; @@ -636,38 +878,57 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; SData ** sdata = pRuntimeEnv->colDataBuffer; - SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIdx]; - SData ** primaryTSBuf = &pRuntimeEnv->primaryColBuffer; - void * tmpBuf = pRuntimeEnv->unzipBuffer; + assert(fileIdx == pRuntimeEnv->vnodeFileInfo.current); - if (vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIdx)) { - dTrace("QInfo:%p vid:%d sid:%d id:%s, data block has been loaded, ts:%d, slot:%d, brange:%lld-%lld, rows:%d", - GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, loadPrimaryCol, pQuery->slot, - pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); + SData **primaryTSBuf = &pRuntimeEnv->primaryColBuffer; + void * tmpBuf = pRuntimeEnv->unzipBuffer; + int32_t columnBytes = 0; - return 0; + SQueryCostSummary *pSummary = &pRuntimeEnv->summary; + + int32_t status = vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); + if (status == DISK_BLOCK_NO_NEED_TO_LOAD) { + dTrace( + "QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, no need to load again, ts:%d, slot:%d, " + "brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, loadPrimaryCol, + pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); + + if (loadSField && (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL)) { + loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, &pQuery->pFields[pQuery->slot]); + } + + return TSDB_CODE_SUCCESS; + } else if (status == DISK_BLOCK_LOAD_TS) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, incrementally load ts", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId); + + assert(PRIMARY_TSCOL_LOADED(pQuery) == false && loadSField == true); + if (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL) { + loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, &pQuery->pFields[pQuery->slot]); + } + + // load primary timestamp + int32_t ret = loadPrimaryTSColumn(pRuntimeEnv, pBlock, pField, &columnBytes); + + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); + return ret; } /* failed to load fields info, return with error info */ - if (loadSField && (loadDataBlockFieldsInfo(pRuntimeEnv, pQueryFileInfo, pBlock, pField) != 0)) { + if (loadSField && (loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, pField) != 0)) { return -1; } - SQueryCostSummary *pSummary = &pRuntimeEnv->summary; - int32_t columnBytes = 0; - int64_t st = taosGetTimestampUs(); if (loadPrimaryCol) { if (PRIMARY_TSCOL_LOADED(pQuery)) { *primaryTSBuf = sdata[0]; } else { - columnBytes += (*pField)[PRIMARYKEY_TIMESTAMP_COL_INDEX].len + sizeof(TSCKSUM); - int32_t ret = - loadColumnIntoMem(pQuery, pQueryFileInfo, pBlock, *pField, PRIMARYKEY_TIMESTAMP_COL_INDEX, *primaryTSBuf, - tmpBuf, pRuntimeEnv->secondaryUnzipBuffer, pRuntimeEnv->unzipBufSize); - if (ret != 0) { - return -1; + int32_t ret = loadPrimaryTSColumn(pRuntimeEnv, pBlock, pField, &columnBytes); + if (ret != TSDB_CODE_SUCCESS) { + return ret; } pSummary->numOfSeek++; @@ -702,7 +963,7 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR fillWithNull(pQuery, sdata[i]->data, i, pBlock->numOfPoints); } else { columnBytes += (*pField)[j].len + sizeof(TSCKSUM); - ret = loadColumnIntoMem(pQuery, pQueryFileInfo, pBlock, *pField, j, sdata[i], tmpBuf, + ret = loadColumnIntoMem(pQuery, &pRuntimeEnv->vnodeFileInfo, pBlock, *pField, j, sdata[i], tmpBuf, pRuntimeEnv->secondaryUnzipBuffer, pRuntimeEnv->unzipBufSize); pSummary->numOfSeek++; @@ -741,7 +1002,7 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR pSummary->loadBlocksUs += (et - st); pSummary->readDiskBlocks++; - vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx); + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); return ret; } @@ -850,9 +1111,11 @@ SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t slo } if (pMeterObj != pBlock->pMeterObj || pBlock->blockId > pQuery->blockId) { - dWarn("QInfo:%p vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d, meterObj:%p, blockMeterObj:%p", - GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->blockId, - pQuery->blockId, pMeterObj, pBlock->pMeterObj); + dWarn( + "QInfo:%p vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d, meterObj:%p, " + "blockMeterObj:%p", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->blockId, + pQuery->blockId, pMeterObj, pBlock->pMeterObj); return NULL; } @@ -874,7 +1137,7 @@ static void *getGenericDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t s static int32_t getFileIdFromKey(int32_t vid, TSKEY key) { SVnodeObj *pVnode = &vnodeList[vid]; - int64_t delta = (int64_t)pVnode->cfg.daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + int64_t delta = (int64_t)pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; return (int32_t)(key / delta); // set the starting fileId } @@ -938,7 +1201,7 @@ static bool getQualifiedDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRunti break; } - dError("QInfo:%p fileId:%d total numOfBlks:%d blockId:%d into memory failed due to error in disk files", + dError("QInfo:%p fileId:%d total numOfBlks:%d blockId:%d load into memory failed due to error in disk files", GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->numOfBlocks, blkIdx); blkIdx += step; } @@ -1110,9 +1373,6 @@ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; -// if (!functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { -// continue; -// } SField dummyField = {0}; @@ -1135,8 +1395,8 @@ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; - int64_t alignedTimestamp = taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, - pQuery->precision); + int64_t alignedTimestamp = + taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, forwardStep, functionId, tpField, hasNull, pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); } @@ -1168,11 +1428,12 @@ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * * first filter the data block according to the value filter condition, then, if the top/bottom query applied, * invoke the filter function to decide if the data block need to be accessed or not. + * TODO handle the whole data block is NULL situation * @param pQuery * @param pField * @return */ -static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx *pCtx) { +static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx *pCtx, int32_t numOfTotalPoints) { if (pField == NULL) { return false; // no need to load data } @@ -1190,6 +1451,11 @@ static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx * if (!vnodeSupportPrefilter(pFilterInfo->info.data.type)) { continue; } + + // all points in current column are NULL, no need to check its boundary value + if (pField[colIndex].numOfNullPoints == numOfTotalPoints) { + continue; + } if (pFilterInfo->info.data.type == TSDB_DATA_TYPE_FLOAT) { float minval = *(double *)(&pField[colIndex].min); @@ -1337,7 +1603,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { TSKEY key = *(TSKEY *)(pCtx[0].aInputElemBuf + TSDB_KEYSIZE * offset); -#if 1 +#if defined(_DEBUG_VIEW) printf("elem in comp ts file:%lld, key:%lld, tag:%d, id:%s, query order:%d, ts order:%d, traverse:%d, index:%d\n", elem.ts, key, elem.tag, pRuntimeEnv->pMeterObj->meterId, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder, pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex); @@ -1405,8 +1671,8 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * char *dataBlock = getDataBlocks(pRuntimeEnv, data, &sasArray[k], k, *forwardStep, isDiskFileBlock); TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; - int64_t alignedTimestamp = taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, - pQuery->precision); + int64_t alignedTimestamp = + taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, (*forwardStep), functionId, pFields, hasNull, pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); @@ -1650,23 +1916,25 @@ static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo * } int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { - if (pRuntimeEnv->numOfFiles == 0) { + if (pRuntimeEnv->vnodeFileInfo.numOfFiles == 0) { return -1; } + SQueryFilesInfo *pVnodeFiles = &pRuntimeEnv->vnodeFileInfo; + /* set the initial file for current query */ - if (order == TSQL_SO_ASC && *fid < pRuntimeEnv->pHeaderFiles[0].fileID) { - *fid = pRuntimeEnv->pHeaderFiles[0].fileID; + if (order == TSQL_SO_ASC && *fid < pVnodeFiles->pFileInfo[0].fileID) { + *fid = pVnodeFiles->pFileInfo[0].fileID; return 0; - } else if (order == TSQL_SO_DESC && *fid > pRuntimeEnv->pHeaderFiles[pRuntimeEnv->numOfFiles - 1].fileID) { - *fid = pRuntimeEnv->pHeaderFiles[pRuntimeEnv->numOfFiles - 1].fileID; - return pRuntimeEnv->numOfFiles - 1; + } else if (order == TSQL_SO_DESC && *fid > pVnodeFiles->pFileInfo[pVnodeFiles->numOfFiles - 1].fileID) { + *fid = pVnodeFiles->pFileInfo[pVnodeFiles->numOfFiles - 1].fileID; + return pVnodeFiles->numOfFiles - 1; } - int32_t numOfFiles = pRuntimeEnv->numOfFiles; + int32_t numOfFiles = pVnodeFiles->numOfFiles; - if (order == TSQL_SO_DESC && *fid > pRuntimeEnv->pHeaderFiles[numOfFiles - 1].fileID) { - *fid = pRuntimeEnv->pHeaderFiles[numOfFiles - 1].fileID; + if (order == TSQL_SO_DESC && *fid > pVnodeFiles->pFileInfo[numOfFiles - 1].fileID) { + *fid = pVnodeFiles->pFileInfo[numOfFiles - 1].fileID; return numOfFiles - 1; } @@ -1674,12 +1942,12 @@ int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, int32_t i = 0; int32_t step = 1; - while (i pRuntimeEnv->pHeaderFiles[i].fileID) { + while (i pVnodeFiles->pFileInfo[i].fileID) { i += step; } - if (i < numOfFiles && *fid <= pRuntimeEnv->pHeaderFiles[i].fileID) { - *fid = pRuntimeEnv->pHeaderFiles[i].fileID; + if (i < numOfFiles && *fid <= pVnodeFiles->pFileInfo[i].fileID) { + *fid = pVnodeFiles->pFileInfo[i].fileID; return i; } else { return -1; @@ -1688,12 +1956,12 @@ int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, int32_t i = numOfFiles - 1; int32_t step = -1; - while (i >= 0 && *fid < pRuntimeEnv->pHeaderFiles[i].fileID) { + while (i >= 0 && *fid < pVnodeFiles->pFileInfo[i].fileID) { i += step; } - if (i >= 0 && *fid >= pRuntimeEnv->pHeaderFiles[i].fileID) { - *fid = pRuntimeEnv->pHeaderFiles[i].fileID; + if (i >= 0 && *fid >= pVnodeFiles->pFileInfo[i].fileID) { + *fid = pVnodeFiles->pFileInfo[i].fileID; return i; } else { return -1; @@ -1723,6 +1991,8 @@ int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeter break; } + + // failed to mmap header file into memory will cause the retrieval of compblock info failed if (vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fid) > 0) { break; } @@ -1821,13 +2091,13 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimes } // set the output buffer for the selectivity + tag query -static void setCtxTagColumnInfo(SQuery* pQuery, SQueryRuntimeEnv* pRuntimeEnv) { +static void setCtxTagColumnInfo(SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv) { if (isSelectivityWithTagsQuery(pQuery)) { int32_t num = 0; SQLFunctionCtx *pCtx = NULL; int16_t tagLen = 0; - SQLFunctionCtx ** pTagCtx = calloc(pQuery->numOfOutputCols, POINTER_BYTES); + SQLFunctionCtx **pTagCtx = calloc(pQuery->numOfOutputCols, POINTER_BYTES); for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { SSqlFuncExprMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].pBase; if (pSqlFuncMsg->functionId == TSDB_FUNC_TAG_DUMMY || pSqlFuncMsg->functionId == TSDB_FUNC_TS_DUMMY) { @@ -1840,7 +2110,7 @@ static void setCtxTagColumnInfo(SQuery* pQuery, SQueryRuntimeEnv* pRuntimeEnv) { // ts may be the required primary timestamp column continue; } else { - assert(0); + // the column may be the normal column, group by normal_column, the functionId is TSDB_FUNC_PRJ } } @@ -1973,7 +2243,7 @@ _error_clean: tfree(pRuntimeEnv->resultInfo); tfree(pRuntimeEnv->pCtx); - for(int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { tfree(pRuntimeEnv->colDataBuffer[i]); } @@ -1993,7 +2263,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { } dTrace("QInfo:%p teardown runtime env", GET_QINFO_ADDR(pRuntimeEnv->pQuery)); - for(int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { tfree(pRuntimeEnv->colDataBuffer[i]); } @@ -2023,24 +2293,11 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tfree(pRuntimeEnv->primaryColBuffer); } - for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) { - SQueryFileInfo *pQFileInfo = &(pRuntimeEnv->pHeaderFiles[i]); - if (pQFileInfo->pHeaderFileData != NULL && pQFileInfo->pHeaderFileData != MAP_FAILED) { - munmap(pQFileInfo->pHeaderFileData, pQFileInfo->headFileSize); - } - tclose(pQFileInfo->headerFd); - - if (pQFileInfo->pDataFileData != NULL && pQFileInfo->pDataFileData != MAP_FAILED) { - munmap(pQFileInfo->pDataFileData, pQFileInfo->defaultMappingSize); - } - - tclose(pQFileInfo->dataFd); - tclose(pQFileInfo->lastFd); - } + doCloseOpenedFileData(&pRuntimeEnv->vnodeFileInfo); - if (pRuntimeEnv->pHeaderFiles != NULL) { - pRuntimeEnv->numOfFiles = 0; - free(pRuntimeEnv->pHeaderFiles); + if (pRuntimeEnv->vnodeFileInfo.pFileInfo != NULL) { + pRuntimeEnv->vnodeFileInfo.numOfFiles = 0; + free(pRuntimeEnv->vnodeFileInfo.pFileInfo); } if (pRuntimeEnv->pInterpoBuf != NULL) { @@ -2059,7 +2316,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { // get maximum time interval in each file static int64_t getOldestKey(int32_t numOfFiles, int64_t fileId, SVnodeCfg *pCfg) { - int64_t duration = pCfg->daysPerFile * tsMsPerDay[pCfg->precision]; + int64_t duration = pCfg->daysPerFile * tsMsPerDay[(uint8_t)pCfg->precision]; return (fileId - numOfFiles + 1) * duration; } @@ -2071,7 +2328,7 @@ bool isQueryKilled(SQuery *pQuery) { * if it will be deleted soon, stop current query ASAP. */ SMeterObj *pMeterObj = pQInfo->pObj; - if (vnodeIsMeterState(pMeterObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pMeterObj, TSDB_METER_STATE_DROPPING)) { pQInfo->killed = 1; return true; } @@ -2783,19 +3040,24 @@ int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *p * currently opened file is not the start file, reset to the start file */ int32_t fileIdx = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, pQuery->order.order); - if (fileIdx < 0) { + if (fileIdx < 0) { // ignore the files on disk dError("QInfo:%p failed to get data file:%d", GET_QINFO_ADDR(pQuery), pQuery->fileId); - // ignore the files on disk position->fileId = -1; return -1; } /* - * NOTE: the compblock information may not be loaded yet, here loaded it firstly + * NOTE: + * The compblock information may not be loaded yet, here loaded it firstly. * If the compBlock info is loaded, it wont be loaded again. + * + * If failed to load comp block into memory due some how reasons, e.g., empty header file/not enough memory */ int32_t numOfBlocks = vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fileIdx); - assert(numOfBlocks > 0); + if (numOfBlocks <= 0) { + position->fileId = -1; + return -1; + } nextTimestamp = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); } @@ -2843,8 +3105,8 @@ bool vnodeParametersSafetyCheck(SQuery *pQuery) { } static int file_order_comparator(const void *p1, const void *p2) { - SQueryFileInfo *pInfo1 = (SQueryFileInfo *)p1; - SQueryFileInfo *pInfo2 = (SQueryFileInfo *)p2; + SHeaderFileInfo *pInfo1 = (SHeaderFileInfo *)p1; + SHeaderFileInfo *pInfo2 = (SHeaderFileInfo *)p2; if (pInfo1->fileID == pInfo2->fileID) { return 0; @@ -2855,142 +3117,32 @@ static int file_order_comparator(const void *p1, const void *p2) { /** * open a data files and header file for metric meta query - * @param pQInfo + * * @param pVnodeFiles * @param fid - * @param vnodeId - * @param fileName - * @param prefix - * @return + * @param index */ -static int32_t vnodeOpenVnodeDBFiles(SQInfo *pQInfo, SQueryFileInfo *pVnodeFiles, int32_t fid, int32_t vnodeId, - char *fileName, char *prefix) { - __off_t size = 0; - - pVnodeFiles->fileID = fid; - pVnodeFiles->defaultMappingSize = DEFAULT_DATA_FILE_MMAP_WINDOW_SIZE; - - snprintf(pVnodeFiles->headerFilePath, 256, "%s%s", prefix, fileName); - -#if 1 - pVnodeFiles->headerFd = open(pVnodeFiles->headerFilePath, O_RDONLY); -#else - int32_t *val = (int32_t *)taosGetStrHashData(fileHandleHashList, pVnodeFiles->headerFilePath); - if (val == NULL) { - pVnodeFiles->headerFd = open(pVnodeFiles->headerFilePath, O_RDONLY); - taosAddStrHash(fileHandleHashList, pVnodeFiles->headerFilePath, (char *)&pVnodeFiles->headerFd); - } else { - pVnodeFiles->headerFd = *val; - } -#endif - - if (!VALIDFD(pVnodeFiles->headerFd)) { - dError("QInfo:%p failed open header file:%s reason:%s", pQInfo, pVnodeFiles->headerFilePath, strerror(errno)); - goto _clean; - } - - struct stat fstat; - if (stat(pVnodeFiles->headerFilePath, &fstat) < 0) return -1; - pVnodeFiles->headFileSize = fstat.st_size; - size = fstat.st_size; - - pVnodeFiles->pHeaderFileData = mmap(NULL, size, PROT_READ, MAP_SHARED, pVnodeFiles->headerFd, 0); - if (pVnodeFiles->pHeaderFileData == MAP_FAILED) { - dError("QInfo:%p failed to map header file:%s, %s", pQInfo, pVnodeFiles->headerFilePath, strerror(errno)); - goto _clean; - } - - /* even the advise failed, continue.. */ - if (madvise(pVnodeFiles->pHeaderFileData, size, MADV_SEQUENTIAL) == -1) { - dError("QInfo:%p failed to advise kernel the usage of header files, reason:%s", pQInfo, strerror(errno)); - } - - snprintf(pVnodeFiles->dataFilePath, 256, "%sv%df%d.data", prefix, vnodeId, fid); - snprintf(pVnodeFiles->lastFilePath, 256, "%sv%df%d.last", prefix, vnodeId, fid); - -#if 1 - pVnodeFiles->dataFd = open(pVnodeFiles->dataFilePath, O_RDONLY); - pVnodeFiles->lastFd = open(pVnodeFiles->lastFilePath, O_RDONLY); -#else - val = (int32_t *)taosGetStrHashData(fileHandleHashList, pVnodeFiles->dataFilePath); - if (val == NULL) { - pVnodeFiles->dataFd = open(pVnodeFiles->dataFilePath, O_RDONLY); - taosAddStrHash(fileHandleHashList, pVnodeFiles->dataFilePath, (char *)&pVnodeFiles->dataFd); - } else { - pVnodeFiles->dataFd = *val; - } -#endif - - if (!VALIDFD(pVnodeFiles->dataFd)) { - dError("QInfo:%p failed to open data file:%s, reason:%s", pQInfo, pVnodeFiles->dataFilePath, strerror(errno)); - goto _clean; - } - - if (!VALIDFD(pVnodeFiles->lastFd)) { - dError("QInfo:%p failed to open last file:%s, reason:%s", pQInfo, pVnodeFiles->lastFilePath, strerror(errno)); - goto _clean; - } - - if (stat(pVnodeFiles->dataFilePath, &fstat) < 0) return -1; - pVnodeFiles->dataFileSize = fstat.st_size; - - if (stat(pVnodeFiles->lastFilePath, &fstat) < 0) return -1; - pVnodeFiles->lastFileSize = fstat.st_size; - -#if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP - /* enforce kernel to preload data when the file is mapping */ - pVnodeFiles->pDataFileData = mmap(NULL, pVnodeFiles->defaultMappingSize, PROT_READ, MAP_PRIVATE | MAP_POPULATE, - pVnodeFiles->dataFd, pVnodeFiles->dtFileMappingOffset); - if (pVnodeFiles->pDataFileData == MAP_FAILED) { - dError("QInfo:%p failed to map data file:%s, %s", pQInfo, pVnodeFiles->dataFilePath, strerror(errno)); - goto _clean; - } - - /* advise kernel the usage of mmaped data */ - if (madvise(pVnodeFiles->pDataFileData, pVnodeFiles->defaultMappingSize, MADV_SEQUENTIAL) == -1) { - dError("QInfo:%p failed to advise kernel the usage of data file:%s, reason:%s", pQInfo, pVnodeFiles->dataFilePath, - strerror(errno)); - } -#endif - - return 0; - -_clean: - if (pVnodeFiles->pHeaderFileData != MAP_FAILED && pVnodeFiles->pDataFileData != NULL) { - munmap(pVnodeFiles->pHeaderFileData, pVnodeFiles->headFileSize); - pVnodeFiles->pHeaderFileData = NULL; - } +static FORCE_INLINE void vnodeStoreFileId(SQueryFilesInfo *pVnodeFiles, int32_t fid, int32_t index) { + pVnodeFiles->pFileInfo[index].fileID = fid; +} -#if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP - if (pVnodeFiles->pDataFileData != MAP_FAILED && pVnodeFiles->pDataFileData != NULL) { - munmap(pVnodeFiles->pDataFileData, pVnodeFiles->defaultMappingSize); - pVnodeFiles->pDataFileData = NULL; - } -#endif +static void vnodeRecordAllFiles(SQInfo *pQInfo, int32_t vnodeId) { + char suffix[] = ".head"; - tclose(pVnodeFiles->headerFd); - tclose(pVnodeFiles->dataFd); - tclose(pVnodeFiles->lastFd); - return -1; -} + struct dirent *pEntry = NULL; + size_t alloc = 4; // default allocated size -static void vnodeOpenAllFiles(SQInfo *pQInfo, int32_t vnodeId) { - char dbFilePathPrefix[TSDB_FILENAME_LEN] = {0}; + SQueryFilesInfo *pVnodeFilesInfo = &(pQInfo->pMeterQuerySupporter->runtimeEnv.vnodeFileInfo); + pVnodeFilesInfo->vnodeId = vnodeId; - sprintf(dbFilePathPrefix, "%s/vnode%d/db/", tsDirectory, vnodeId); - DIR *pDir = opendir(dbFilePathPrefix); + sprintf(pVnodeFilesInfo->dbFilePathPrefix, "%s/vnode%d/db/", tsDirectory, vnodeId); + DIR *pDir = opendir(pVnodeFilesInfo->dbFilePathPrefix); if (pDir == NULL) { - dError("QInfo:%p failed to open directory:%s", pQInfo, dbFilePathPrefix); + dError("QInfo:%p failed to open directory:%s, %s", pQInfo, pVnodeFilesInfo->dbFilePathPrefix, strerror(errno)); return; } - char suffix[] = ".head"; - - struct dirent *pEntry = NULL; - int32_t alloc = 4; // default allocated size - - SQueryRuntimeEnv *pRuntimeEnv = &(pQInfo->pMeterQuerySupporter->runtimeEnv); - pRuntimeEnv->pHeaderFiles = calloc(1, sizeof(SQueryFileInfo) * alloc); + pVnodeFilesInfo->pFileInfo = calloc(1, sizeof(SHeaderFileInfo) * alloc); SVnodeObj *pVnode = &vnodeList[vnodeId]; while ((pEntry = readdir(pDir)) != NULL) { @@ -3024,26 +3176,24 @@ static void vnodeOpenAllFiles(SQInfo *pQInfo, int32_t vnodeId) { assert(fid >= 0 && vid >= 0); - if (++pRuntimeEnv->numOfFiles > alloc) { - alloc = alloc << 1; - pRuntimeEnv->pHeaderFiles = realloc(pRuntimeEnv->pHeaderFiles, alloc * sizeof(SQueryFileInfo)); - memset(&pRuntimeEnv->pHeaderFiles[alloc >> 1], 0, (alloc >> 1) * sizeof(SQueryFileInfo)); + if (++pVnodeFilesInfo->numOfFiles > alloc) { + alloc = alloc << 1U; + pVnodeFilesInfo->pFileInfo = realloc(pVnodeFilesInfo->pFileInfo, alloc * sizeof(SHeaderFileInfo)); + memset(&pVnodeFilesInfo->pFileInfo[alloc >> 1U], 0, (alloc >> 1U) * sizeof(SHeaderFileInfo)); } - SQueryFileInfo *pVnodeFiles = &pRuntimeEnv->pHeaderFiles[pRuntimeEnv->numOfFiles - 1]; - int32_t ret = vnodeOpenVnodeDBFiles(pQInfo, pVnodeFiles, fid, vnodeId, pEntry->d_name, dbFilePathPrefix); - if (ret < 0) { - memset(pVnodeFiles, 0, sizeof(SQueryFileInfo)); // reset information - pRuntimeEnv->numOfFiles -= 1; - } + int32_t index = pVnodeFilesInfo->numOfFiles - 1; + vnodeStoreFileId(pVnodeFilesInfo, fid, index); } closedir(pDir); - dTrace("QInfo:%p find %d data files in %s to be checked", pQInfo, pRuntimeEnv->numOfFiles, dbFilePathPrefix); + dTrace("QInfo:%p find %d data files in %s to be checked", pQInfo, pVnodeFilesInfo->numOfFiles, + pVnodeFilesInfo->dbFilePathPrefix); /* order the files information according their names */ - qsort(pRuntimeEnv->pHeaderFiles, (size_t)pRuntimeEnv->numOfFiles, sizeof(SQueryFileInfo), file_order_comparator); + qsort(pVnodeFilesInfo->pFileInfo, (size_t)pVnodeFilesInfo->numOfFiles, sizeof(SHeaderFileInfo), + file_order_comparator); } static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInfo, void *pBlock) { @@ -3122,9 +3272,11 @@ static bool onlyOneQueryType(SQuery *pQuery, int32_t functId, int32_t functIdDst for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; - if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) { + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG || + functionId == TSDB_FUNC_TAG_DUMMY) { continue; } + if (functionId != functId && functionId != functIdDst) { return false; } @@ -3137,7 +3289,7 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } -static void rewriteExecOrder(SQuery *pQuery, bool metricQuery) { +static void changeExecuteScanOrder(SQuery *pQuery, bool metricQuery) { // in case of point-interpolation query, use asc order scan char msg[] = "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%lld-%lld, " @@ -3436,9 +3588,18 @@ void pointInterpSupporterSetData(SQInfo *pQInfo, SPointInterpoSupporter *pPointI if (pQuery->interpoType == TSDB_INTERPO_SET_VALUE) { for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - SInterpInfo * pInterpInfo = (SInterpInfo *)pRuntimeEnv->pCtx[i].aOutputBuf; + + // only the function of interp needs the corresponding information + if (pCtx->functionId != TSDB_FUNC_INTERP) { + continue; + } + + pCtx->numOfParams = 4; + + SInterpInfo *pInterpInfo = (SInterpInfo *)pRuntimeEnv->pCtx[i].aOutputBuf; pInterpInfo->pInterpDetail = calloc(1, sizeof(SInterpInfoDetail)); + SInterpInfoDetail *pInterpDetail = pInterpInfo->pInterpDetail; // for primary timestamp column, set the flag @@ -3515,9 +3676,9 @@ void pointInterpSupporterInit(SQuery *pQuery, SPointInterpoSupporter *pInterpoSu int32_t offset = 0; - for (int32_t i = 0, j = 0; i < pQuery->numOfCols; ++i, ++j) { - pInterpoSupport->pPrevPoint[j] = prev + offset; - pInterpoSupport->pNextPoint[j] = next + offset; + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + pInterpoSupport->pPrevPoint[i] = prev + offset; + pInterpoSupport->pNextPoint[i] = next + offset; offset += pQuery->colList[i].data.bytes; } @@ -3605,7 +3766,7 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete } setScanLimitationByResultBuffer(pQuery); - rewriteExecOrder(pQuery, false); + changeExecuteScanOrder(pQuery, false); pQInfo->over = 0; pQInfo->pointsRead = 0; @@ -3614,6 +3775,8 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete // dataInCache requires lastKey value pQuery->lastKey = pQuery->skey; + doInitQueryFileInfoFD(&pSupporter->runtimeEnv.vnodeFileInfo); + vnodeInitDataBlockInfo(&pSupporter->runtimeEnv.loadBlockInfo); vnodeInitLoadCompBlockInfo(&pSupporter->runtimeEnv.loadCompBlockInfo); @@ -3646,7 +3809,7 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete return ret; } - vnodeOpenAllFiles(pQInfo, pMeterObj->vnode); + vnodeRecordAllFiles(pQInfo, pMeterObj->vnode); if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if ((ret = allocateOutputBufForGroup(pSupporter, pQuery, false)) != TSDB_CODE_SUCCESS) { @@ -3739,7 +3902,7 @@ void vnodeQueryFreeQInfoEx(SQInfo *pQInfo) { } } - if (VALIDFD(pSupporter->meterOutputFd)) { + if (FD_VALID(pSupporter->meterOutputFd)) { assert(pSupporter->meterOutputMMapBuf != NULL); dTrace("QInfo:%p disk-based output buffer during query:%lld bytes", pQInfo, pSupporter->bufSize); munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); @@ -3781,8 +3944,9 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) pQInfo->pointsRead = 0; pQuery->pointsRead = 0; - rewriteExecOrder(pQuery, true); + changeExecuteScanOrder(pQuery, true); + doInitQueryFileInfoFD(&pSupporter->runtimeEnv.vnodeFileInfo); vnodeInitDataBlockInfo(&pSupporter->runtimeEnv.loadBlockInfo); vnodeInitLoadCompBlockInfo(&pSupporter->runtimeEnv.loadCompBlockInfo); @@ -3823,7 +3987,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) } tSidSetSort(pSupporter->pSidSet); - vnodeOpenAllFiles(pQInfo, pMeter->vnode); + vnodeRecordAllFiles(pQInfo, pMeter->vnode); if ((ret = allocateOutputBufForGroup(pSupporter, pQuery, true)) != TSDB_CODE_SUCCESS) { return ret; @@ -3839,15 +4003,20 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) getTmpfilePath("tb_metric_mmap", pSupporter->extBufFile); pSupporter->meterOutputFd = open(pSupporter->extBufFile, O_CREAT | O_RDWR, 0666); - if (!VALIDFD(pSupporter->meterOutputFd)) { + if (!FD_VALID(pSupporter->meterOutputFd)) { dError("QInfo:%p failed to create file: %s on disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); return TSDB_CODE_SERV_OUT_OF_MEMORY; } - // set 4k page for each meter pSupporter->numOfPages = pSupporter->numOfMeters; - ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE); + ret = ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE); + if (ret != TSDB_CODE_SUCCESS) { + dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile, + strerror(errno)); + return TSDB_CODE_SERV_NO_DISKSPACE; + } + pSupporter->runtimeEnv.numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / pQuery->rowSize; pSupporter->lastPageId = -1; pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE; @@ -3855,7 +4024,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) pSupporter->meterOutputMMapBuf = mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0); if (pSupporter->meterOutputMMapBuf == MAP_FAILED) { - dError("QInfo:%p failed to map data file: %s to disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); + dError("QInfo:%p failed to map temp file: %s. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); return TSDB_CODE_SERV_OUT_OF_MEMORY; } } @@ -3880,14 +4049,14 @@ void vnodeDecMeterRefcnt(SQInfo *pQInfo) { SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; if (pSupporter == NULL || pSupporter->numOfMeters == 1) { - __sync_fetch_and_sub(&pQInfo->pObj->numOfQueries, 1); + atomic_fetch_sub_32(&pQInfo->pObj->numOfQueries, 1); dTrace("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pQInfo->pObj->vnode, pQInfo->pObj->sid, pQInfo->pObj->meterId, pQInfo->pObj->numOfQueries); } else { int32_t num = 0; for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { SMeterObj *pMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[i]->sid); - __sync_fetch_and_sub(&(pMeter->numOfQueries), 1); + atomic_fetch_sub_32(&(pMeter->numOfQueries), 1); if (pMeter->numOfQueries > 0) { dTrace("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pMeter->vnode, pMeter->sid, @@ -3907,13 +4076,13 @@ void vnodeDecMeterRefcnt(SQInfo *pQInfo) { } // todo merge with doRevisedResultsByLimit -void UNUSED_FUNC truncateResultByLimit(SQInfo *pQInfo, int64_t * final, int32_t *interpo) { +void UNUSED_FUNC truncateResultByLimit(SQInfo *pQInfo, int64_t *final, int32_t *interpo) { SQuery *pQuery = &(pQInfo->query); - if (pQuery->limit.limit > 0 && ((* final) + pQInfo->pointsRead > pQuery->limit.limit)) { - int64_t num = (* final) + pQInfo->pointsRead - pQuery->limit.limit; + if (pQuery->limit.limit > 0 && ((*final) + pQInfo->pointsRead > pQuery->limit.limit)) { + int64_t num = (*final) + pQInfo->pointsRead - pQuery->limit.limit; (*interpo) -= num; - (* final) -= num; + (*final) -= num; setQueryStatus(pQuery, QUERY_COMPLETED); // query completed } @@ -3931,7 +4100,7 @@ TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index) { /* * NOTE: pQuery->pos will not change, the corresponding data block will be loaded into buffer * loadDataBlockOnDemand will change the value of pQuery->pos, according to the pQuery->lastKey - * */ + */ TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { SQuery *pQuery = pRuntimeEnv->pQuery; @@ -3950,23 +4119,16 @@ TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { bool loadTimestamp = true; int32_t fileId = pQuery->fileId; int32_t fileIndex = vnodeGetVnodeHeaderFileIdx(&fileId, pRuntimeEnv, pQuery->order.order); - - if (!vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIndex)) { - dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, slot:%d load data block due to primary key required", - GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot); - - // todo handle failed to load data, file corrupted - // todo refactor the return value - int32_t ret = + + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, slot:%d load data block due to primary key required", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot); + + int32_t ret = loadDataBlockIntoMem(pBlock, &pQuery->pFields[pQuery->slot], pRuntimeEnv, fileIndex, loadTimestamp, true); - UNUSED(ret); - } - - // the fields info is not loaded, load it into memory - if (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL) { - loadDataBlockFieldsInfo(pRuntimeEnv, &pRuntimeEnv->pHeaderFiles[fileIndex], pBlock, &pQuery->pFields[pQuery->slot]); + if (ret != TSDB_CODE_SUCCESS) { + return -1; } - + SET_DATA_BLOCK_LOADED(pRuntimeEnv->blockStatus); SET_FILE_BLOCK_FLAG(pRuntimeEnv->blockStatus); @@ -3996,7 +4158,7 @@ static void getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { } } -//TODO handle case that the cache is allocated but not assign to SMeterObj +// TODO handle case that the cache is allocated but not assign to SMeterObj void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn) { SQuery * pQuery = pRuntimeEnv->pQuery; SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); @@ -4454,17 +4616,18 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, tFilePage pCtx[i].hasNull = true; pCtx[i].nStartQueryTimestamp = timestamp; - pCtx[i].aInputElemBuf = ((char *) inputSrc->data) + - ((int32_t) pRuntimeEnv->offset[i] * pRuntimeEnv->numOfRowsPerPage) + pCtx[i].outputBytes * inputIdx; + pCtx[i].aInputElemBuf = ((char *)inputSrc->data) + + ((int32_t)pRuntimeEnv->offset[i] * pRuntimeEnv->numOfRowsPerPage) + + pCtx[i].outputBytes * inputIdx; - //in case of tag column, the tag information should be extracted from input buffer + // in case of tag column, the tag information should be extracted from input buffer if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG) { tVariantDestroy(&pCtx[i].tag); tVariantCreateFromBinary(&pCtx[i].tag, pCtx[i].aInputElemBuf, pCtx[i].inputBytes, pCtx[i].inputType); } } - for(int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; if (functionId == TSDB_FUNC_TAG_DUMMY) { continue; @@ -4475,8 +4638,7 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, tFilePage } static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) { - if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || - functionId == TSDB_FUNC_LAST_DST) { + if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { switch (srcDataType) { case TSDB_DATA_TYPE_BINARY: printf("%ld,%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); @@ -4655,16 +4817,20 @@ int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj *pSupporter) { SQuery * pQuery = pRuntimeEnv->pQuery; int64_t st = taosGetTimestampMs(); + int32_t ret = TSDB_CODE_SUCCESS; while (pSupporter->subgroupIdx < pSupporter->pSidSet->numOfSubSet) { int32_t start = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx]; int32_t end = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx + 1]; - int32_t ret = - doMergeMetersResultsToGroupRes(pSupporter, pQuery, pRuntimeEnv, pSupporter->pMeterDataInfo, start, end); + ret = doMergeMetersResultsToGroupRes(pSupporter, pQuery, pRuntimeEnv, pSupporter->pMeterDataInfo, start, end); + if (ret < 0) { // not enough disk space to save the data into disk + return -1; + } + pSupporter->subgroupIdx += 1; - /* this group generates at least one result, return results */ + // this group generates at least one result, return results if (ret > 0) { break; } @@ -4676,7 +4842,7 @@ int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj *pSupporter) { dTrace("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%lldms", GET_QINFO_ADDR(pQuery), pSupporter->subgroupIdx - 1, pSupporter->pSidSet->numOfSubSet, taosGetTimestampMs() - st); - return pSupporter->numOfGroupResultPages; + return TSDB_CODE_SUCCESS; } void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery) { @@ -4684,7 +4850,9 @@ void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery) pSupporter->numOfGroupResultPages = 0; // current results of group has been sent to client, try next group - mergeMetersResultToOneGroups(pSupporter); + if (mergeMetersResultToOneGroups(pSupporter) != TSDB_CODE_SUCCESS) { + return; // failed to save data in the disk + } // set current query completed if (pSupporter->numOfGroupResultPages == 0 && pSupporter->subgroupIdx == pSupporter->pSidSet->numOfSubSet) { @@ -4762,7 +4930,10 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery } else { // copy data to disk buffer if (buffer[0]->numOfElems == pQuery->pointsToRead) { - flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv); + if (flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv) != TSDB_CODE_SUCCESS) { + return -1; + } + resetMergeResultBuf(pQuery, pCtx); } @@ -4809,7 +4980,15 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery } if (buffer[0]->numOfElems != 0) { // there are data in buffer - flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv); + if (flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv) != TSDB_CODE_SUCCESS) { + dError("QInfo:%p failed to flush data into temp file, abort query", GET_QINFO_ADDR(pQuery), + pSupporter->extBufFile); + tfree(pTree); + tfree(pValidMeter); + tfree(posArray); + + return -1; + } } int64_t endt = taosGetTimestampMs(); @@ -4828,25 +5007,45 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery return pSupporter->numOfGroupResultPages; } -static void extendDiskBuf(SMeterQuerySupportObj *pSupporter, int32_t numOfPages) { +static int32_t extendDiskBuf(const SQuery *pQuery, SMeterQuerySupportObj *pSupporter, int32_t numOfPages) { assert(pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE == pSupporter->bufSize); + SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + int32_t ret = munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); pSupporter->numOfPages = numOfPages; - // disk-based output buffer is exhausted, try to extend the disk-based buffer + /* + * disk-based output buffer is exhausted, try to extend the disk-based buffer, the available disk space may + * be insufficient + */ ret = ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE); if (ret != 0) { - perror("error in allocate the disk-based buffer"); - return; + dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile, + strerror(errno)); + pQInfo->code = -TSDB_CODE_SERV_NO_DISKSPACE; + pQInfo->killed = 1; + + return pQInfo->code; } pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE; pSupporter->meterOutputMMapBuf = mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0); + + if (pSupporter->meterOutputMMapBuf == MAP_FAILED) { + dError("QInfo:%p failed to map temp file: %s. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); + pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY; + pQInfo->killed = 1; + + return pQInfo->code; + } + + return TSDB_CODE_SUCCESS; } -void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, const SQueryRuntimeEnv *pRuntimeEnv) { +int32_t flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, + const SQueryRuntimeEnv *pRuntimeEnv) { int32_t numOfMeterResultBufPages = pSupporter->lastPageId + 1; int64_t dstSize = numOfMeterResultBufPages * DEFAULT_INTERN_BUF_SIZE + pSupporter->groupResultSize * (pSupporter->numOfGroupResultPages + 1); @@ -4857,7 +5056,9 @@ void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, requiredPages += pSupporter->numOfMeters; } - extendDiskBuf(pSupporter, requiredPages); + if (extendDiskBuf(pQuery, pSupporter, requiredPages) != TSDB_CODE_SUCCESS) { + return -1; + } } char *lastPosition = pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * numOfMeterResultBufPages + @@ -4871,6 +5072,7 @@ void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, } pSupporter->numOfGroupResultPages += 1; + return TSDB_CODE_SUCCESS; } void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx) { @@ -4888,7 +5090,7 @@ void setMeterDataInfo(SMeterDataInfo *pMeterDataInfo, SMeterObj *pMeterObj, int3 pMeterDataInfo->meterOrderIdx = meterIdx; } -void doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { +int32_t doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; @@ -4902,11 +5104,20 @@ void doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { pRuntimeEnv->pMeterObj = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[index]->sid); assert(pRuntimeEnv->pMeterObj == pMeterInfo[i].pMeterObj); - setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo); - saveResult(pSupporter, pMeterInfo[i].pMeterQInfo, pMeterInfo[i].pMeterQInfo->lastResRows); + int32_t ret = setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + ret = saveResult(pSupporter, pMeterInfo[i].pMeterQInfo, pMeterInfo[i].pMeterQInfo->lastResRows); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } } } } + + return TSDB_CODE_SUCCESS; } void disableFunctForSuppleScan(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { @@ -4932,7 +5143,8 @@ void disableFunctForSuppleScan(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { } } } - } else { + } else { // TODO ERROR!! + // need to handle for each query result, not just the single runtime ctx. for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { pRuntimeEnv->pCtx[i].order = (pRuntimeEnv->pCtx[i].order) ^ 1; int32_t functId = pQuery->pSelectExpr[i].pBase.functionId; @@ -5212,6 +5424,7 @@ static void doSingleMeterSupplementScan(SQueryRuntimeEnv *pRuntimeEnv) { // usually this load operation will incure load disk block operation TSKEY endKey = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->endPos); + assert((QUERY_IS_ASC_QUERY(pQuery) && endKey <= pQuery->ekey) || (!QUERY_IS_ASC_QUERY(pQuery) && endKey >= pQuery->ekey)); @@ -5450,19 +5663,26 @@ static int32_t offsetComparator(const void *pLeft, const void *pRight) { * @param pMeterHeadDataInfo * @return */ -SMeterDataInfo **vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, SQueryFileInfo *pQueryFileInfo, - tSidSet *pSidSet, SMeterDataInfo *pMeterDataInfo, int32_t *numOfMeters) { - SQuery * pQuery = &pQInfo->query; +SMeterDataInfo **vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, int32_t fileIndex, tSidSet *pSidSet, + SMeterDataInfo *pMeterDataInfo, int32_t *numOfMeters) { + SQuery *pQuery = &pQInfo->query; + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SMeterSidExtInfo ** pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; SVnodeObj *pVnode = &vnodeList[vid]; - char * pHeaderData = pQueryFileInfo->pHeaderFileData; + char *pHeaderFileData = vnodeGetHeaderFileData(pRuntimeEnv, vid, fileIndex); + if (pHeaderFileData == NULL) { // failed to load header file into buffer + return 0; + } + int32_t tmsize = sizeof(SCompHeader) * (pVnode->cfg.maxSessions) + sizeof(TSCKSUM); // file is corrupted, abort query in current file - if (validateHeaderOffsetSegment(pQInfo, pQueryFileInfo->headerFilePath, vid, pHeaderData, tmsize) < 0) { + if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, vid, pHeaderFileData, tmsize) < + 0) { *numOfMeters = 0; return 0; } @@ -5513,14 +5733,14 @@ SMeterDataInfo **vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, SQueryF int64_t headerOffset = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pMeterObj->sid; - SCompHeader *compHeader = (SCompHeader *)(pHeaderData + headerOffset); + SCompHeader *compHeader = (SCompHeader *)(pHeaderFileData + headerOffset); if (compHeader->compInfoOffset == 0) { continue; } if (compHeader->compInfoOffset < sizeof(SCompHeader) * pVnode->cfg.maxSessions + TSDB_FILE_HEADER_LEN || - compHeader->compInfoOffset > pQueryFileInfo->headFileSize) { + compHeader->compInfoOffset > pRuntimeEnv->vnodeFileInfo.headFileSize) { dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid", pQuery, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, compHeader->compInfoOffset); continue; @@ -5605,18 +5825,25 @@ void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY } } -static tFilePage *allocNewPage(SMeterQuerySupportObj *pSupporter, uint32_t *pageId) { +static tFilePage *allocNewPage(SQuery *pQuery, SMeterQuerySupportObj *pSupporter, uint32_t *pageId) { if (pSupporter->lastPageId == pSupporter->numOfPages - 1) { - extendDiskBuf(pSupporter, pSupporter->numOfPages + pSupporter->numOfMeters); + if (extendDiskBuf(pQuery, pSupporter, pSupporter->numOfPages + pSupporter->numOfMeters) != TSDB_CODE_SUCCESS) { + return NULL; + } } *pageId = (++pSupporter->lastPageId); return getFilePage(pSupporter, *pageId); } -tFilePage *addDataPageForMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter) { - uint32_t pageId = 0; - tFilePage *pPage = allocNewPage(pSupporter, &pageId); +tFilePage *addDataPageForMeterQueryInfo(SQuery *pQuery, SMeterQueryInfo *pMeterQueryInfo, + SMeterQuerySupportObj *pSupporter) { + uint32_t pageId = 0; + + tFilePage *pPage = allocNewPage(pQuery, pSupporter, &pageId); + if (pPage == NULL) { // failed to allocate disk-based buffer for intermediate results + return NULL; + } if (pMeterQueryInfo->numOfPages >= pMeterQueryInfo->numOfAlloc) { pMeterQueryInfo->numOfAlloc = pMeterQueryInfo->numOfAlloc << 1; @@ -5752,7 +5979,7 @@ static bool setCurrentQueryRange(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, * @return */ uint32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, char *pHeaderData, - int32_t numOfMeters, SQueryFileInfo *pQueryFileInfo, SMeterDataInfo **pMeterDataInfo) { + int32_t numOfMeters, const char *filePath, SMeterDataInfo **pMeterDataInfo) { uint32_t numOfBlocks = 0; SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); SQueryCostSummary *pSummary = &pSupporter->runtimeEnv.summary; @@ -5764,7 +5991,7 @@ uint32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuer SMeterObj *pMeterObj = pMeterDataInfo[j]->pMeterObj; SCompInfo *compInfo = (SCompInfo *)(pHeaderData + pMeterDataInfo[j]->offsetInHeaderFile); - int32_t ret = validateCompBlockInfoSegment(pQInfo, pQueryFileInfo->headerFilePath, pMeterObj->vnode, compInfo, + int32_t ret = validateCompBlockInfoSegment(pQInfo, filePath, pMeterObj->vnode, compInfo, pMeterDataInfo[j]->offsetInHeaderFile); if (ret != 0) { clearMeterDataBlockInfo(pMeterDataInfo[j]); @@ -5783,8 +6010,7 @@ uint32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuer // check compblock integrity TSCKSUM checksum = *(TSCKSUM *)((char *)compInfo + sizeof(SCompInfo) + size); - ret = validateCompBlockSegment(pQInfo, pQueryFileInfo->headerFilePath, compInfo, (char *)pCompBlock, - pMeterObj->vnode, checksum); + ret = validateCompBlockSegment(pQInfo, filePath, compInfo, (char *)pCompBlock, pMeterObj->vnode, checksum); if (ret < 0) { clearMeterDataBlockInfo(pMeterDataInfo[j]); continue; @@ -6115,46 +6341,53 @@ void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t } } -void setOutputBufferForIntervalQuery(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo) { +int32_t setOutputBufferForIntervalQuery(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; tFilePage * pData = NULL; + SQuery *pQuery = pRuntimeEnv->pQuery; + // in the first scan, new space needed for results if (pMeterQueryInfo->numOfPages == 0) { - pData = addDataPageForMeterQueryInfo(pMeterQueryInfo, pSupporter); + pData = addDataPageForMeterQueryInfo(pQuery, pMeterQueryInfo, pSupporter); } else { int32_t lastPageId = pMeterQueryInfo->pageList[pMeterQueryInfo->numOfPages - 1]; pData = getFilePage(pSupporter, lastPageId); if (pData->numOfElems >= pRuntimeEnv->numOfRowsPerPage) { - pData = addDataPageForMeterQueryInfo(pMeterQueryInfo, pSupporter); - assert(pData->numOfElems == 0); // number of elements must be 0 for new allocated buffer + pData = addDataPageForMeterQueryInfo(pRuntimeEnv->pQuery, pMeterQueryInfo, pSupporter); + if (pData != NULL) { + assert(pData->numOfElems == 0); // number of elements must be 0 for new allocated buffer + } } } + if (pData == NULL) { + return -1; + } + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { pRuntimeEnv->pCtx[i].aOutputBuf = getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, i); pRuntimeEnv->pCtx[i].resultInfo = &pMeterQueryInfo->resultInfo[i]; } + + return TSDB_CODE_SUCCESS; } -void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t meterIdx, - SMeterQueryInfo *pMeterQueryInfo) { +int32_t setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t meterIdx, + SMeterQueryInfo *pMeterQueryInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; - SQuery * pQuery = pRuntimeEnv->pQuery; if (IS_MASTER_SCAN(pRuntimeEnv)) { - setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo); + if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) { + // not enough disk space or memory buffer for intermediate results + return -1; + } if (pMeterQueryInfo->lastResRows == 0) { initCtxOutputBuf(pRuntimeEnv); } - // reset the number of iterated elements, once this function is called. since the pCtx for different - for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { - // pRuntimeEnv->pCtx[j].numOfIteratedElems = 0; - } - } else { if (pMeterQueryInfo->reverseFillRes) { setCtxOutputPointerForSupplementScan(pSupporter, pMeterQueryInfo); @@ -6165,7 +6398,9 @@ void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t * * If the master scan does not produce any results, new spaces needed to be allocated during supplement scan */ - setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo); + if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) { + return -1; + } } } @@ -6184,6 +6419,8 @@ void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t tsBufSetCursor(pSupporter->runtimeEnv.pTSBuf, &pMeterQueryInfo->cur); } } + + return 0; } static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pInfo, @@ -6453,11 +6690,10 @@ bool needPrimaryTimestampCol(SQuery *pQuery, SBlockInfo *pBlockInfo) { return loadPrimaryTS; } -int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, int8_t *blkStatus, SQueryRuntimeEnv *pRuntimeEnv, +int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blkStatus, SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIdx, int32_t slotIdx, __block_search_fn_t searchFn, bool onDemand) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; - SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIdx]; + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; @@ -6492,7 +6728,7 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, int8_t *blkS setTimestampRange(pRuntimeEnv, pBlock->keyFirst, pBlock->keyLast); } else if (req == BLK_DATA_FILEDS_NEEDED) { - if (loadDataBlockFieldsInfo(pRuntimeEnv, pQueryFileInfo, pBlock, pFields) < 0) { + if (loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, pFields) < 0) { return DISK_DATA_LOAD_FAILED; } } else { @@ -6501,7 +6737,7 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, int8_t *blkS } } else { _load_all: - if (loadDataBlockFieldsInfo(pRuntimeEnv, pQueryFileInfo, pBlock, pFields) < 0) { + if (loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, pFields) < 0) { return DISK_DATA_LOAD_FAILED; } @@ -6512,7 +6748,7 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, int8_t *blkS * filter the data block according to the value filter condition. * no need to load the data block, continue for next block */ - if (!needToLoadDataBlock(pQuery, *pFields, pRuntimeEnv->pCtx)) { + if (!needToLoadDataBlock(pQuery, *pFields, pRuntimeEnv->pCtx, pBlock->numOfPoints)) { #if defined(_DEBUG_VIEW) dTrace("QInfo:%p fileId:%d, slot:%d, block discarded by per-filter, ", GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot); @@ -6576,14 +6812,14 @@ static void validateResultBuf(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo } } -void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult) { +int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; // no results generated, do nothing for master scan if (numOfResult <= 0) { if (IS_MASTER_SCAN(pRuntimeEnv)) { - return; + return TSDB_CODE_SUCCESS; } else { /* * There is a case that no result generated during the the supplement scan, and during the main @@ -6608,7 +6844,7 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI setCtxOutputPointerForSupplementScan(pSupporter, pMeterQueryInfo); } - return; + return TSDB_CODE_SUCCESS; } } @@ -6637,7 +6873,9 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI pMeterQueryInfo->numOfRes += numOfResult; assert(pData->numOfElems <= pRuntimeEnv->numOfRowsPerPage); - setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo); + if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) { + return -1; + } for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { resetResultInfo(&pMeterQueryInfo->resultInfo[i]); @@ -6660,6 +6898,8 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI tColModelDisplay(cm, outputPage->data, outputPage->numOfElems, pRuntimeEnv->numOfRowsPerPage); #endif } + + return TSDB_CODE_SUCCESS; } static int32_t getSubsetNumber(SMeterQuerySupportObj *pSupporter) { @@ -6859,7 +7099,7 @@ bool vnodeHasRemainResults(void *handle) { // query has completed if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)) { - TSKEY ekey = taosGetRevisedEndKey(pSupporter->rawEKey, pQuery->order.order, pQuery->nAggTimeInterval, + TSKEY ekey = taosGetRevisedEndKey(pSupporter->rawEKey, pQuery->order.order, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); int32_t numOfTotal = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY *)pRuntimeEnv->pInterpoBuf[0]->data, remain, pQuery->nAggTimeInterval, ekey, pQuery->pointsToRead); @@ -6904,36 +7144,18 @@ static int32_t resultInterpolate(SQInfo *pQInfo, tFilePage **data, tFilePage **p return numOfRes; } -static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char* data, int32_t* size) { - SMeterObj* pObj = pQInfo->pObj; - SQuery* pQuery = &pQInfo->query; +static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data) { + SMeterObj *pObj = pQInfo->pObj; + SQuery * pQuery = &pQInfo->query; int tnumOfRows = vnodeList[pObj->vnode].cfg.rowsInFileBlock; - int32_t dataSize = pQInfo->query.rowSize * numOfRows; - - if (dataSize >= tsCompressMsgSize && tsCompressMsgSize > 0) { - char* compBuf = malloc((size_t) dataSize); - - // for metric query, bufIndex always be 0. - char* d = compBuf; - for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { // pQInfo->bufIndex == 0 - int32_t bytes = pQuery->pSelectExpr[col].resBytes; - - memmove(d, pQuery->sdata[col]->data + bytes * tnumOfRows * pQInfo->bufIndex, bytes * numOfRows); - d += bytes * numOfRows; - } - *size = tsCompressString(compBuf, dataSize, 1, data, dataSize + EXTRA_BYTES, 0, 0, 0); + // for metric query, bufIndex always be 0. + for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { // pQInfo->bufIndex == 0 + int32_t bytes = pQuery->pSelectExpr[col].resBytes; - dTrace("QInfo:%p compress rsp msg, before:%d, after:%d", pQInfo, dataSize, *size); - free(compBuf); - } else { // for metric query, bufIndex always be 0. - for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { // pQInfo->bufIndex == 0 - int32_t bytes = pQuery->pSelectExpr[col].resBytes; - - memmove(data, pQuery->sdata[col]->data + bytes * tnumOfRows * pQInfo->bufIndex, bytes * numOfRows); - data += bytes * numOfRows; - } + memmove(data, pQuery->sdata[col]->data + bytes * tnumOfRows * pQInfo->bufIndex, bytes * numOfRows); + data += bytes * numOfRows; } } @@ -6948,9 +7170,9 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char* data * @param numOfRows the number of rows that are not returned in current retrieve * @return */ -int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows, int32_t* size) { +int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows) { SQInfo *pQInfo = (SQInfo *)handle; - SQuery * pQuery = &pQInfo->query; + SQuery *pQuery = &pQInfo->query; assert(pQuery->pSelectExpr != NULL && pQuery->numOfOutputCols > 0); @@ -6959,9 +7181,9 @@ int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows, i int32_t fd = open(pQuery->sdata[0]->data, O_RDONLY, 0666); // make sure file exist - if (VALIDFD(fd)) { + if (FD_VALID(fd)) { size_t s = lseek(fd, 0, SEEK_END); - dTrace("QInfo:%p ts comp data return, file:%s, size:%ld", pQInfo, pQuery->sdata[0]->data, size); + dTrace("QInfo:%p ts comp data return, file:%s, size:%lld", pQInfo, pQuery->sdata[0]->data, s); lseek(fd, 0, SEEK_SET); read(fd, data, s); @@ -6973,7 +7195,7 @@ int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows, i pQuery->sdata[0]->data, strerror(errno)); } } else { - doCopyQueryResultToMsg(pQInfo, numOfRows, data, size); + doCopyQueryResultToMsg(pQInfo, numOfRows, data); } return numOfRows; @@ -6988,7 +7210,7 @@ int32_t vnodeQueryResultInterpolate(SQInfo *pQInfo, tFilePage **pDst, tFilePage while (1) { numOfRows = taosNumOfRemainPoints(&pRuntimeEnv->interpoInfo); - TSKEY ekey = taosGetRevisedEndKey(pSupporter->rawEKey, pQuery->order.order, pQuery->nAggTimeInterval, + TSKEY ekey = taosGetRevisedEndKey(pSupporter->rawEKey, pQuery->order.order, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); int32_t numOfFinalRows = taosGetNumOfResultWithInterpo(&pRuntimeEnv->interpoInfo, (TSKEY *)pDataSrc[0]->data, numOfRows, diff --git a/src/system/detail/src/vnodeQueryProcess.c b/src/system/detail/src/vnodeQueryProcess.c index 73d97f5e6e11896870287a9ac340f07c336d8b01..a545645a0967d5f2f12ebfe412cd884cc8fb24f2 100644 --- a/src/system/detail/src/vnodeQueryProcess.c +++ b/src/system/detail/src/vnodeQueryProcess.c @@ -14,10 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" #include "taosmsg.h" #include "textbuffer.h" @@ -30,7 +27,7 @@ #include "vnodeQueryImpl.h" #define ALL_CACHE_BLOCKS_CHECKED(q) \ - ((q)->slot == (q)->currentSlot && QUERY_IS_ASC_QUERY(q) || (q)->slot == (q)->firstSlot && (!QUERY_IS_ASC_QUERY(q))) + (((q)->slot == (q)->currentSlot && QUERY_IS_ASC_QUERY(q)) || ((q)->slot == (q)->firstSlot && (!QUERY_IS_ASC_QUERY(q)))) #define FORWARD_CACHE_BLOCK_CHECK_SLOT(slot, step, maxblocks) (slot) = ((slot) + (step) + (maxblocks)) % (maxblocks); @@ -160,7 +157,11 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe setExecutionContext(pSupporter, pSupporter->pResult, k, pMeterInfo[k].groupIdx, pMeterQueryInfo); } else { - setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo); + int32_t ret = setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo); + if (ret != TSDB_CODE_SUCCESS) { + pQInfo->killed = 1; + return NULL; + } } qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, @@ -269,7 +270,9 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe __block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeter->searchAlgorithm]; int32_t vnodeId = pTempMeter->vnode; - dTrace("QInfo:%p start to check data blocks in %d files", pQInfo, pRuntimeEnv->numOfFiles); + SQueryFilesInfo* pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; + + dTrace("QInfo:%p start to check data blocks in %d files", pQInfo, pVnodeFileInfo->numOfFiles); int32_t fid = QUERY_IS_ASC_QUERY(pQuery) ? -1 : INT32_MAX; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -292,34 +295,41 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe pQuery->fileId = fid; pSummary->numOfFiles++; - SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIdx]; - char * pHeaderData = pQueryFileInfo->pHeaderFileData; - - int32_t numOfQualifiedMeters = 0; - SMeterDataInfo **pReqMeterDataInfo = vnodeFilterQualifiedMeters( - pQInfo, vnodeId, pQueryFileInfo, pSupporter->pSidSet, pMeterDataInfo, &numOfQualifiedMeters); - dTrace("QInfo:%p file:%s, %d meters qualified", pQInfo, pQueryFileInfo->dataFilePath, numOfQualifiedMeters); + char *pHeaderFileData = vnodeGetHeaderFileData(pRuntimeEnv, vnodeId, fileIdx); + if (pHeaderFileData == NULL) { // failed to mmap header file into buffer, ignore current file, try next + fid += step; + continue; + } + + int32_t numOfQualifiedMeters = 0; + assert(fileIdx == pRuntimeEnv->vnodeFileInfo.current); + + SMeterDataInfo **pReqMeterDataInfo = vnodeFilterQualifiedMeters(pQInfo, vnodeId, fileIdx, pSupporter->pSidSet, + pMeterDataInfo, &numOfQualifiedMeters); if (pReqMeterDataInfo == NULL) { dError("QInfo:%p failed to allocate memory to perform query processing, abort", pQInfo); - pQInfo->code = TSDB_CODE_SERV_OUT_OF_MEMORY; + pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY; pQInfo->killed = 1; return NULL; } - // none of meters in query set have pHeaderData in this file, try next file + dTrace("QInfo:%p file:%s, %d meters qualified", pQInfo, pVnodeFileInfo->dataFilePath, numOfQualifiedMeters); + + // none of meters in query set have pHeaderFileData in this file, try next file if (numOfQualifiedMeters == 0) { fid += step; tfree(pReqMeterDataInfo); continue; } - uint32_t numOfBlocks = getDataBlocksForMeters(pSupporter, pQuery, pHeaderData, numOfQualifiedMeters, pQueryFileInfo, - pReqMeterDataInfo); + uint32_t numOfBlocks = getDataBlocksForMeters(pSupporter, pQuery, pHeaderFileData, numOfQualifiedMeters, + pVnodeFileInfo->headerFilePath, pReqMeterDataInfo); - dTrace("QInfo:%p file:%s, %d meters contains %d blocks to be checked", pQInfo, pQueryFileInfo->dataFilePath, + dTrace("QInfo:%p file:%s, %d meters contains %d blocks to be checked", pQInfo, pVnodeFileInfo->dataFilePath, numOfQualifiedMeters, numOfBlocks); + if (numOfBlocks == 0) { fid += step; tfree(pReqMeterDataInfo); @@ -332,7 +342,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe dError("QInfo:%p failed to allocate memory to perform query processing, abort", pQInfo); tfree(pReqMeterDataInfo); - pQInfo->code = TSDB_CODE_SERV_OUT_OF_MEMORY; + pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY; pQInfo->killed = 1; return NULL; } @@ -344,7 +354,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe totalBlocks += numOfBlocks; - // sequentially scan the pHeaderData file + // sequentially scan the pHeaderFileData file int32_t j = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfBlocks - 1; for (; j < numOfBlocks && j >= 0; j += step) { @@ -387,7 +397,12 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe setExecutionContext(pSupporter, pSupporter->pResult, pOneMeterDataInfo->meterOrderIdx, pOneMeterDataInfo->groupIdx, pMeterQueryInfo); } else { // interval query - setIntervalQueryExecutionContext(pSupporter, pOneMeterDataInfo->meterOrderIdx, pMeterQueryInfo); + int32_t ret = setIntervalQueryExecutionContext(pSupporter, pOneMeterDataInfo->meterOrderIdx, pMeterQueryInfo); + if (ret != TSDB_CODE_SUCCESS) { + tfree(pReqMeterDataInfo); // error code has been set + pQInfo->killed = 1; + return NULL; + } } SCompBlock *pBlock = pInfoEx->pBlock.compBlock; @@ -426,7 +441,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe } int64_t time = taosGetTimestampUs() - st; - dTrace("QInfo:%p complete check %d files, %d blocks, elapsed time:%.3fms", pQInfo, pRuntimeEnv->numOfFiles, + dTrace("QInfo:%p complete check %d files, %d blocks, elapsed time:%.3fms", pQInfo, pVnodeFileInfo->numOfFiles, totalBlocks, time / 1000.0); pSummary->fileTimeUs += time; @@ -503,7 +518,7 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start #if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) { - resetMMapWindow(&pRuntimeEnv->pHeaderFiles[i]); + resetMMapWindow(&pRuntimeEnv->pVnodeFiles[i]); } #endif @@ -673,7 +688,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { #if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) { - resetMMapWindow(&pRuntimeEnv->pHeaderFiles[i]); + resetMMapWindow(&pRuntimeEnv->pVnodeFiles[i]); } #endif @@ -881,6 +896,7 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { pSupporter->pMeterDataInfo = (SMeterDataInfo *)calloc(1, sizeof(SMeterDataInfo) * pSupporter->numOfMeters); if (pSupporter->pMeterDataInfo == NULL) { dError("QInfo:%p failed to allocate memory, %s", pQInfo, strerror(errno)); + pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY; return; } @@ -894,7 +910,12 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { dTrace("QInfo:%p main scan completed, elapsed time: %lldms, supplementary scan start, order:%d", pQInfo, et - st, pQuery->order.order ^ 1); - doCloseAllOpenedResults(pSupporter); + // failed to save all intermediate results into disk, abort further query processing + if (doCloseAllOpenedResults(pSupporter) != TSDB_CODE_SUCCESS) { + dError("QInfo:%p failed to save intermediate results, abort further query processing", pQInfo); + return; + } + doMultiMeterSupplementaryScan(pQInfo); if (isQueryKilled(pQuery)) { @@ -905,12 +926,13 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { if (pQuery->nAggTimeInterval > 0) { assert(pSupporter->subgroupIdx == 0 && pSupporter->numOfGroupResultPages == 0); - mergeMetersResultToOneGroups(pSupporter); - copyResToQueryResultBuf(pSupporter, pQuery); - + if (mergeMetersResultToOneGroups(pSupporter) == TSDB_CODE_SUCCESS) { + copyResToQueryResultBuf(pSupporter, pQuery); + #ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); + displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); #endif + } } else { // not a interval query copyFromGroupBuf(pQInfo, pSupporter->pResult); } @@ -1151,12 +1173,13 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { } if (pQInfo->killed) { - TSDB_QINFO_RESET_SIG(pQInfo); - dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + dTrace("QInfo:%p it is already killed, abort", pQInfo); + vnodeDecRefCount(pQInfo); + return; } - assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); + assert(pQInfo->refCount >= 1); SQuery * pQuery = &pQInfo->query; SMeterObj *pMeterObj = pQInfo->pObj; @@ -1190,10 +1213,8 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, numOfInterpo, pQInfo->pointsRead, pQInfo->pointsInterpo, pQInfo->pointsReturned); - dTrace("QInfo:%p reset signature", pQInfo); - - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); return; } @@ -1212,23 +1233,22 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsInterpo, pQInfo->pointsReturned); - dTrace("QInfo:%p reset signature", pQInfo); - - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); + return; } } } pQInfo->over = 1; - dTrace("QInfo:%p vid:%d sid:%d id:%s, query over, %d points are returned, reset signature", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, query over, %d points are returned", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQInfo->pointsRead); vnodePrintQueryStatistics(pQInfo->pMeterQuerySupporter); - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); - + + vnodeDecRefCount(pQInfo); return; } @@ -1256,15 +1276,15 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { /* check if query is killed or not */ if (isQueryKilled(pQuery)) { - dTrace("QInfo:%p query is killed, reset signature", pQInfo); + dTrace("QInfo:%p query is killed", pQInfo); pQInfo->over = 1; } else { - dTrace("QInfo:%p vid:%d sid:%d id:%s, meter query thread completed, %d points are returned, reset signature", + dTrace("QInfo:%p vid:%d sid:%d id:%s, meter query thread completed, %d points are returned", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead); } - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); } void vnodeMultiMeterQuery(SSchedMsg *pMsg) { @@ -1275,12 +1295,12 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) { } if (pQInfo->killed) { - TSDB_QINFO_RESET_SIG(pQInfo); - dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + vnodeDecRefCount(pQInfo); + dTrace("QInfo:%p it is already killed, abort", pQInfo); return; } - assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); + assert(pQInfo->refCount >= 1); SQuery *pQuery = &pQInfo->query; pQuery->pointsRead = 0; @@ -1301,7 +1321,6 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) { pQInfo->useconds += (taosGetTimestampUs() - st); pQInfo->over = isQueryKilled(pQuery) ? 1 : 0; - dTrace("QInfo:%p reset signature", pQInfo); taosInterpoSetStartInfo(&pQInfo->pMeterQuerySupporter->runtimeEnv.interpoInfo, pQuery->pointsRead, pQInfo->query.interpoType); @@ -1309,11 +1328,11 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) { if (pQuery->pointsRead == 0) { pQInfo->over = 1; - dTrace("QInfo:%p over, %d meters queried, %d points are returned, reset signature", pQInfo, pSupporter->numOfMeters, + dTrace("QInfo:%p over, %d meters queried, %d points are returned", pQInfo, pSupporter->numOfMeters, pQInfo->pointsRead); vnodePrintQueryStatistics(pSupporter); } - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); } diff --git a/src/system/detail/src/vnodeRead.c b/src/system/detail/src/vnodeRead.c index 3e580b747f72280323ebbe85092d1227d82d9b40..bbd3e9465c32174566f08b809545ae4b5d7e5f65 100644 --- a/src/system/detail/src/vnodeRead.c +++ b/src/system/detail/src/vnodeRead.c @@ -14,12 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include -#include +#include "os.h" #include "ihash.h" #include "taosmsg.h" @@ -31,8 +26,6 @@ #include "vnodeRead.h" #include "vnodeUtil.h" -#pragma GCC diagnostic ignored "-Wint-conversion" - int (*pQueryFunc[])(SMeterObj *, SQuery *) = {vnodeQueryFromCache, vnodeQueryFromFile}; int vnodeInterpolationSearchKey(char *pValue, int num, TSKEY key, int order) { @@ -200,8 +193,6 @@ static SQInfo *vnodeAllocateQInfoCommon(SQueryMeterMsg *pQueryMsg, SMeterObj *pM } else { pQuery->colList[i].data.filters = NULL; } - - pQuery->dataRowSize += colList[i].bytes; } vnodeUpdateQueryColumnIndex(pQuery, pMeterObj); @@ -399,10 +390,10 @@ __clean_memory: return NULL; } -static void vnodeFreeQInfoInQueueImpl(SSchedMsg *pMsg) { - SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; - vnodeFreeQInfo(pQInfo, true); -} +//static void vnodeFreeQInfoInQueueImpl(SSchedMsg *pMsg) { +// SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; +// vnodeFreeQInfo(pQInfo, true); +//} void vnodeFreeQInfoInQueue(void *param) { SQInfo *pQInfo = (SQInfo *)param; @@ -410,15 +401,18 @@ void vnodeFreeQInfoInQueue(void *param) { if (!vnodeIsQInfoValid(pQInfo)) return; pQInfo->killed = 1; - - dTrace("QInfo:%p set kill flag and add to queue, stop query ASAP", pQInfo); - SSchedMsg schedMsg = {0}; - schedMsg.fp = vnodeFreeQInfoInQueueImpl; - - schedMsg.msg = NULL; - schedMsg.thandle = (void *)1; - schedMsg.ahandle = param; - taosScheduleTask(queryQhandle, &schedMsg); + dTrace("QInfo:%p set kill flag to free QInfo"); + + vnodeDecRefCount(pQInfo); + +// dTrace("QInfo:%p set kill flag and add to queue, stop query ASAP", pQInfo); +// SSchedMsg schedMsg = {0}; +// schedMsg.fp = vnodeFreeQInfoInQueueImpl; + +// schedMsg.msg = NULL; +// schedMsg.thandle = (void *)1; +// schedMsg.ahandle = param; +// taosScheduleTask(queryQhandle, &schedMsg); } void vnodeFreeQInfo(void *param, bool decQueryRef) { @@ -426,8 +420,6 @@ void vnodeFreeQInfo(void *param, bool decQueryRef) { if (!vnodeIsQInfoValid(param)) return; pQInfo->killed = 1; - TSDB_WAIT_TO_SAFE_DROP_QINFO(pQInfo); - SMeterObj *pObj = pQInfo->pObj; dTrace("QInfo:%p start to free SQInfo", pQInfo); @@ -488,13 +480,9 @@ void vnodeFreeQInfo(void *param, bool decQueryRef) { } tfree(pQuery->pGroupbyExpr); - dTrace("QInfo:%p vid:%d sid:%d meterId:%s, QInfo is freed", pQInfo, pObj->vnode, pObj->sid, pObj->meterId); - /* - * destory signature, in order to avoid the query process pass the object - * safety check - */ + //destroy signature, in order to avoid the query process pass the object safety check memset(pQInfo, 0, sizeof(SQInfo)); tfree(pQInfo); } @@ -510,7 +498,30 @@ bool vnodeIsQInfoValid(void *param) { * into local variable, then compare by using local variable */ uint64_t sig = pQInfo->signature; - return (sig == (uint64_t)pQInfo) || (sig == TSDB_QINFO_QUERY_FLAG); + return (sig == (uint64_t)pQInfo); +} + +void vnodeDecRefCount(void *param) { + SQInfo *pQInfo = (SQInfo*) param; + + assert(vnodeIsQInfoValid(pQInfo)); + + int32_t ref = atomic_sub_fetch_32(&pQInfo->refCount, 1); + assert(ref >= 0); + + dTrace("QInfo:%p decrease obj refcount, %d", pQInfo, ref); + if (ref == 0) { + vnodeFreeQInfo(pQInfo, true); + } +} + +void vnodeAddRefCount(void *param) { + SQInfo *pQInfo = (SQInfo*) param; + + assert(vnodeIsQInfoValid(pQInfo)); + + int32_t ref = atomic_add_fetch_32(&pQInfo->refCount, 1); + dTrace("QInfo:%p add refcount, %d", pQInfo, ref); } void vnodeQueryData(SSchedMsg *pMsg) { @@ -520,12 +531,11 @@ void vnodeQueryData(SSchedMsg *pMsg) { pQInfo = (SQInfo *)pMsg->ahandle; if (pQInfo->killed) { - TSDB_QINFO_RESET_SIG(pQInfo); - dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + dTrace("QInfo:%p it is already killed, abort", pQInfo); + vnodeDecRefCount(pQInfo); return; } - assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); pQuery = &(pQInfo->query); SMeterObj *pObj = pQInfo->pObj; @@ -593,13 +603,11 @@ void vnodeQueryData(SSchedMsg *pMsg) { tclose(pQInfo->query.lfd); } - /* reset QInfo signature */ - dTrace("QInfo:%p reset signature", pQInfo); - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); } -void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, +void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, SQueryMeterMsg *pQueryMsg, int32_t *code) { SQInfo *pQInfo; SQuery *pQuery; @@ -670,6 +678,7 @@ void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExp } if (pQInfo->over == 1) { + vnodeAddRefCount(pQInfo); // for retrieve procedure return pQInfo; } @@ -678,15 +687,20 @@ void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExp schedMsg.fp = vnodeQueryData; } - // set in query flag - pQInfo->signature = TSDB_QINFO_QUERY_FLAG; - + /* + * The reference count, which is 2, is for both the current query thread and the future retrieve request, + * which will always be issued by client to acquire data or free SQInfo struct. + */ + vnodeAddRefCount(pQInfo); + vnodeAddRefCount(pQInfo); + schedMsg.msg = NULL; schedMsg.thandle = (void *)1; schedMsg.ahandle = pQInfo; - dTrace("QInfo:%p set query flag and prepare runtime environment completed, wait for schedule", pQInfo); - + dTrace("QInfo:%p set query flag and prepare runtime environment completed, ref:%d, wait for schedule", pQInfo, + pQInfo->refCount); + taosScheduleTask(queryQhandle, &schedMsg); return pQInfo; @@ -786,12 +800,13 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE goto _error; } + vnodeAddRefCount(pQInfo); if (pQInfo->over == 1) { return pQInfo; } - pQInfo->signature = TSDB_QINFO_QUERY_FLAG; - + vnodeAddRefCount(pQInfo); + schedMsg.msg = NULL; schedMsg.thandle = (void *)1; schedMsg.ahandle = pQInfo; @@ -833,11 +848,11 @@ int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t * } if (pQInfo->killed) { - dTrace("QInfo:%p it is already killed, %p, code:%d", pQInfo, pQuery, pQInfo->code); + dTrace("QInfo:%p query is killed, %p, code:%d", pQInfo, pQuery, pQInfo->code); if (pQInfo->code == TSDB_CODE_SUCCESS) { return TSDB_CODE_QUERY_CANCELLED; } else { // in case of not TSDB_CODE_SUCCESS, return the code to client - return pQInfo->code; + return abs(pQInfo->code); } } @@ -846,8 +861,13 @@ int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t * *rowSize = pQuery->rowSize; *timePrec = vnodeList[pQInfo->pObj->vnode].cfg.precision; - - if (pQInfo->code < 0) return -pQInfo->code; + + dTrace("QInfo:%p, retrieve data info completed, precision:%d, rowsize:%d, rows:%d, code:%d", pQInfo, *timePrec, + *rowSize, *numOfRows, pQInfo->code); + + if (pQInfo->code < 0) { // less than 0 means there are error existed. + return -pQInfo->code; + } return TSDB_CODE_SUCCESS; } @@ -859,30 +879,26 @@ int vnodeSaveQueryResult(void *handle, char *data, int32_t *size) { // the remained number of retrieved rows, not the interpolated result int numOfRows = pQInfo->pointsRead - pQInfo->pointsReturned; - int32_t numOfFinal = vnodeCopyQueryResultToMsg(pQInfo, data, numOfRows, size); + int32_t numOfFinal = vnodeCopyQueryResultToMsg(pQInfo, data, numOfRows); pQInfo->pointsReturned += numOfFinal; dTrace("QInfo:%p %d are returned, totalReturned:%d totalRead:%d", pQInfo, numOfFinal, pQInfo->pointsReturned, pQInfo->pointsRead); if (pQInfo->over == 0) { - dTrace("QInfo:%p set query flag, oldSig:%p, func:%s", pQInfo, pQInfo->signature, __FUNCTION__); - uint64_t oldSignature = TSDB_QINFO_SET_QUERY_FLAG(pQInfo); - - /* - * If SQInfo has been released, the value of signature cannot be equalled to - * the address of pQInfo, since in release function, the original value has - * been - * destroyed. However, this memory area may be reused by another function. - * It may be 0 or any value, but it is rarely still be equalled to the address - * of SQInfo. - */ - if (oldSignature == 0 || oldSignature != (uint64_t)pQInfo) { - dTrace("%p freed or killed, old sig:%p abort query", pQInfo, oldSignature); + #ifdef _TD_ARM_ + dTrace("QInfo:%p set query flag, sig:%" PRIu64 ", func:vnodeSaveQueryResult", pQInfo, pQInfo->signature); + #else + dTrace("QInfo:%p set query flag, sig:%" PRIu64 ", func:%s", pQInfo, pQInfo->signature, __FUNCTION__); + #endif + + if (pQInfo->killed == 1) { + dTrace("%p freed or killed, abort query", pQInfo); } else { + vnodeAddRefCount(pQInfo); dTrace("%p add query into task queue for schedule", pQInfo); - - SSchedMsg schedMsg; + + SSchedMsg schedMsg = {0}; if (pQInfo->pMeterQuerySupporter != NULL) { if (pQInfo->pMeterQuerySupporter->pSidSet == NULL) { @@ -1009,8 +1025,9 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { if (pDestFilterInfo->filterOnBinary) { pDestFilterInfo->len = htobe64(pFilterInfo->len); - pDestFilterInfo->pz = calloc(1, pDestFilterInfo->len + 1); - memcpy(pDestFilterInfo->pz, pMsg, pDestFilterInfo->len + 1); + + pDestFilterInfo->pz = (int64_t)calloc(1, pDestFilterInfo->len + 1); + memcpy((void*)pDestFilterInfo->pz, pMsg, pDestFilterInfo->len + 1); pMsg += (pDestFilterInfo->len + 1); } else { pDestFilterInfo->lowerBndi = htobe64(pFilterInfo->lowerBndi); @@ -1028,8 +1045,7 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { * 1. simple projection query on meters, we only record the pSqlFuncExprs[i].colIdx value * 2. for complex queries, whole SqlExprs object is required. */ - pQueryMsg->pSqlFuncExprs = malloc(POINTER_BYTES * pQueryMsg->numOfOutputCols); - + pQueryMsg->pSqlFuncExprs = (int64_t)malloc(POINTER_BYTES * pQueryMsg->numOfOutputCols); SSqlFuncExprMsg *pExprMsg = (SSqlFuncExprMsg *)pMsg; for (int32_t i = 0; i < pQueryMsg->numOfOutputCols; ++i) { @@ -1076,7 +1092,7 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pQueryMsg->colNameLen = htonl(pQueryMsg->colNameLen); if (hasArithmeticFunction) { // column name array assert(pQueryMsg->colNameLen > 0); - pQueryMsg->colNameList = pMsg; + pQueryMsg->colNameList = (int64_t)pMsg; pMsg += pQueryMsg->colNameLen; } @@ -1085,10 +1101,12 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pSids[0] = (SMeterSidExtInfo *)pMsg; pSids[0]->sid = htonl(pSids[0]->sid); - + pSids[0]->uid = htobe64(pSids[0]->uid); + for (int32_t j = 1; j < pQueryMsg->numOfSids; ++j) { pSids[j] = (SMeterSidExtInfo *)((char *)pSids[j - 1] + sizeof(SMeterSidExtInfo) + pQueryMsg->tagLength); pSids[j]->sid = htonl(pSids[j]->sid); + pSids[j]->uid = htobe64(pSids[j]->uid); } pMsg = (char *)pSids[pQueryMsg->numOfSids - 1]; diff --git a/src/system/detail/src/vnodeShell.c b/src/system/detail/src/vnodeShell.c index 190c1d6ee675ae11e15adecd080bbd20c765e2e9..3d0b9eb51fdf076834973f1b9abf0f660177b5a7 100644 --- a/src/system/detail/src/vnodeShell.c +++ b/src/system/detail/src/vnodeShell.c @@ -14,10 +14,8 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" + #include "taosmsg.h" #include "vnode.h" #include "vnodeShell.h" @@ -29,8 +27,9 @@ #include "vnode.h" #include "vnodeRead.h" #include "vnodeUtil.h" +#include "vnodeStore.h" +#include "vnodeStatus.h" -#pragma GCC diagnostic ignored "-Wint-conversion" extern int tsMaxQueues; void * pShellServer = NULL; @@ -39,16 +38,27 @@ SShellObj **shellList = NULL; int vnodeProcessRetrieveRequest(char *pMsg, int msgLen, SShellObj *pObj); int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj); int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj); +static void vnodeProcessBatchSubmitTimer(void *param, void *tmrId); int vnodeSelectReqNum = 0; int vnodeInsertReqNum = 0; +typedef struct { + int32_t import; + int32_t vnode; + int32_t numOfSid; + int32_t ssid; // Start sid + SShellObj *pObj; + int64_t offset; // offset relative the blks + char blks[]; +} SBatchSubmitInfo; + void *vnodeProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { int sid, vnode; SShellObj *pObj = (SShellObj *)ahandle; SIntMsg * pMsg = (SIntMsg *)msg; uint32_t peerId, peerIp; - short peerPort; + uint16_t peerPort; char ipstr[20]; if (msg == NULL) { @@ -88,28 +98,35 @@ void *vnodeProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { } } - // if ( vnodeList[vnode].status != TSDB_STATUS_MASTER && pMsg->msgType != TSDB_MSG_TYPE_RETRIEVE ) { - -#ifdef CLUSTER - if (vnodeList[vnode].status != TSDB_STATUS_MASTER) { - taosSendSimpleRsp(thandle, pMsg->msgType + 1, TSDB_CODE_NOT_READY); - dTrace("vid:%d sid:%d, shell msg is ignored since in state:%d", vnode, sid, vnodeList[vnode].status); - } else { -#endif - dTrace("vid:%d sid:%d, msg:%s is received pConn:%p", vnode, sid, taosMsg[pMsg->msgType], thandle); + dTrace("vid:%d sid:%d, msg:%s is received pConn:%p", vnode, sid, taosMsg[pMsg->msgType], thandle); - if (pMsg->msgType == TSDB_MSG_TYPE_QUERY) { + if (pMsg->msgType == TSDB_MSG_TYPE_QUERY) { + if (vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_MASTER || vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_SLAVE) { vnodeProcessQueryRequest((char *)pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); - } else if (pMsg->msgType == TSDB_MSG_TYPE_RETRIEVE) { - vnodeProcessRetrieveRequest((char *)pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); - } else if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT) { - vnodeProcessShellSubmitRequest((char *)pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); } else { - dError("%s is not processed", taosMsg[pMsg->msgType]); + taosSendSimpleRsp(thandle, pMsg->msgType + 1, TSDB_CODE_NOT_READY); + dTrace("vid:%d sid:%d, shell query msg is ignored since in status:%s", vnode, sid, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); } -#ifdef CLUSTER + } else if (pMsg->msgType == TSDB_MSG_TYPE_RETRIEVE) { + if (vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_MASTER || vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_SLAVE) { + vnodeProcessRetrieveRequest((char *) pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); + } else { + taosSendSimpleRsp(thandle, pMsg->msgType + 1, TSDB_CODE_NOT_READY); + dTrace("vid:%d sid:%d, shell retrieve msg is ignored since in status:%s", vnode, sid, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + } + } else if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT) { + if (vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_MASTER) { + vnodeProcessShellSubmitRequest((char *) pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); + } else if (vnodeList[vnode].vnodeStatus == TSDB_VN_STATUS_SLAVE) { + taosSendSimpleRsp(thandle, pMsg->msgType + 1, TSDB_CODE_REDIRECT); + dTrace("vid:%d sid:%d, shell submit msg is redirect since in status:%s", vnode, sid, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + } else { + taosSendSimpleRsp(thandle, pMsg->msgType + 1, TSDB_CODE_NOT_READY); + dTrace("vid:%d sid:%d, shell submit msg is ignored since in status:%s", vnode, sid, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + } + } else { + dError("%s is not processed", taosMsg[pMsg->msgType]); } -#endif return pObj; } @@ -128,11 +145,9 @@ int vnodeInitShell() { if (numOfThreads < 1) numOfThreads = 1; memset(&rpcInit, 0, sizeof(rpcInit)); -#ifdef CLUSTER - rpcInit.localIp = tsInternalIp; -#else - rpcInit.localIp = "0.0.0.0"; -#endif + + rpcInit.localIp = tsAnyIp ? "0.0.0.0" : tsPrivateIp; + rpcInit.localPort = tsVnodeShellPort; rpcInit.label = "DND-shell"; rpcInit.numOfThreads = numOfThreads; @@ -156,6 +171,11 @@ int vnodeInitShell() { } int vnodeOpenShellVnode(int vnode) { + if (shellList[vnode] != NULL) { + dError("vid:%d, shell is already opened", vnode); + return -1; + } + const int32_t MIN_NUM_OF_SESSIONS = 300; SVnodeCfg *pCfg = &vnodeList[vnode].cfg; @@ -164,23 +184,31 @@ int vnodeOpenShellVnode(int vnode) { size_t size = sessions * sizeof(SShellObj); shellList[vnode] = (SShellObj *)calloc(1, size); if (shellList[vnode] == NULL) { - dError("vid:%d failed to allocate shellObj, size:%d", vnode, size); + dError("vid:%d, sessions:%d, failed to allocate shellObj, size:%d", vnode, pCfg->maxSessions, size); return -1; } if(taosOpenRpcChannWithQ(pShellServer, vnode, sessions, rpcQhandle[(vnode+1)%tsMaxQueues]) != TSDB_CODE_SUCCESS) { + dError("vid:%d, sessions:%d, failed to open shell", vnode, pCfg->maxSessions); return -1; } + dPrint("vid:%d, sessions:%d, shell is opened", vnode, pCfg->maxSessions); return TSDB_CODE_SUCCESS; } static void vnodeDelayedFreeResource(void *param, void *tmrId) { int32_t vnode = *(int32_t*) param; - taosCloseRpcChann(pShellServer, vnode); // close connection - tfree (shellList[vnode]); //free SShellObj + dTrace("vid:%d, start to free resources for 500ms arrived", vnode); + taosCloseRpcChann(pShellServer, vnode); // close connection + tfree(shellList[vnode]); //free SShellObj tfree(param); + + memset(vnodeList + vnode, 0, sizeof(SVnodeObj)); + dTrace("vid:%d, status set to %s", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + + vnodeCalcOpenVnodes(); } void vnodeCloseShellVnode(int vnode) { @@ -232,6 +260,7 @@ int vnodeSendShellSubmitRspMsg(SShellObj *pObj, int code, int numOfPoints) { char *pMsg, *pStart; int msgLen; + dTrace("code:%d numOfTotalPoints:%d", code, numOfPoints); pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_SUBMIT_RSP, 128); if (pStart == NULL) return -1; pMsg = pStart; @@ -263,13 +292,14 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { } if (pQueryMsg->numOfSids <= 0) { + dError("Invalid number of meters to query, numOfSids:%d", pQueryMsg->numOfSids); code = TSDB_CODE_INVALID_QUERY_MSG; goto _query_over; } if (pQueryMsg->vnode >= TSDB_MAX_VNODES || pQueryMsg->vnode < 0) { - dTrace("qmsg:%p,vid:%d is out of range", pQueryMsg, pQueryMsg->vnode); - code = TSDB_CODE_INVALID_SESSION_ID; + dError("qmsg:%p,vid:%d is out of range", pQueryMsg, pQueryMsg->vnode); + code = TSDB_CODE_INVALID_TABLE_ID; goto _query_over; } @@ -278,34 +308,33 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (pVnode->cfg.maxSessions == 0) { dError("qmsg:%p,vid:%d is not activated yet", pQueryMsg, pQueryMsg->vnode); vnodeSendVpeerCfgMsg(pQueryMsg->vnode); - code = TSDB_CODE_NOT_ACTIVE_SESSION; + code = TSDB_CODE_NOT_ACTIVE_TABLE; goto _query_over; } if (!(pVnode->accessState & TSDB_VN_READ_ACCCESS)) { + dError("qmsg:%p,vid:%d access not allowed", pQueryMsg, pQueryMsg->vnode); code = TSDB_CODE_NO_READ_ACCESS; goto _query_over; } - - if (pQueryMsg->pSidExtInfo == 0) { - dTrace("qmsg:%p,SQueryMeterMsg wrong format", pQueryMsg); - code = TSDB_CODE_INVALID_QUERY_MSG; + + if (pVnode->meterList == NULL) { + dError("qmsg:%p,vid:%d has been closed", pQueryMsg, pQueryMsg->vnode); + code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _query_over; } - if (pVnode->meterList == NULL) { - dError("qmsg:%p,vid:%d has been closed", pQueryMsg, pQueryMsg->vnode); - code = TSDB_CODE_NOT_ACTIVE_SESSION; + if (pQueryMsg->pSidExtInfo == 0) { + dError("qmsg:%p,SQueryMeterMsg wrong format", pQueryMsg); + code = TSDB_CODE_INVALID_QUERY_MSG; goto _query_over; } pSids = (SMeterSidExtInfo **)pQueryMsg->pSidExtInfo; for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { if (pSids[i]->sid >= pVnode->cfg.maxSessions || pSids[i]->sid < 0) { - dTrace("qmsg:%p sid:%d is out of range, valid range:[%d,%d]", pQueryMsg, pSids[i]->sid, 0, - pVnode->cfg.maxSessions); - - code = TSDB_CODE_INVALID_SESSION_ID; + dError("qmsg:%p sid:%d out of range, valid range:[%d,%d]", pQueryMsg, pSids[i]->sid, 0, pVnode->cfg.maxSessions); + code = TSDB_CODE_INVALID_TABLE_ID; goto _query_over; } } @@ -347,7 +376,7 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (QUERY_IS_STABLE_QUERY(pQueryMsg->queryType)) { pObj->qhandle = vnodeQueryOnMultiMeters(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); } else { - pObj->qhandle = vnodeQueryInTimeRange(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); + pObj->qhandle = vnodeQueryOnSingleTable(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); } _query_over: @@ -360,12 +389,12 @@ _query_over: tfree(pMeterObjList); ret = vnodeSendQueryRspMsg(pObj, code, pObj->qhandle); - free(pQueryMsg->pSidExtInfo); + tfree(pQueryMsg->pSidExtInfo); for(int32_t i = 0; i < pQueryMsg->numOfCols; ++i) { vnodeFreeColumnInfo(&pQueryMsg->colList[i]); } - __sync_fetch_and_add(&vnodeSelectReqNum, 1); + atomic_fetch_add_32(&vnodeSelectReqNum, 1); return ret; } @@ -385,14 +414,21 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { pRetrieve = (SRetrieveMeterMsg *)pMsg; pRetrieve->free = htons(pRetrieve->free); + if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) { + dTrace("retrieve msg, handle:%p, free:%d", pRetrieve->qhandle, pRetrieve->free); + } else { + dTrace("retrieve msg to free resource from client, handle:%p, free:%d", pRetrieve->qhandle, pRetrieve->free); + } + /* * in case of server restart, apps may hold qhandle created by server before restart, * which is actually invalid, therefore, signature check is required. */ if (pRetrieve->qhandle == (uint64_t)pObj->qhandle) { // if free flag is set, client wants to clean the resources - if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) + if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) { code = vnodeRetrieveQueryInfo((void *)(pRetrieve->qhandle), &numOfRows, &rowSize, &timePrec); + } } else { dError("QInfo:%p, qhandle:%p is not matched with saved:%p", pObj->qhandle, pRetrieve->qhandle, pObj->qhandle); code = TSDB_CODE_INVALID_QHANDLE; @@ -418,8 +454,8 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { pRsp->precision = htons(timePrec); if (code == TSDB_CODE_SUCCESS) { - pRsp->offset = htobe64(vnodeGetOffsetVal(pRetrieve->qhandle)); - pRsp->useconds = ((SQInfo *)(pRetrieve->qhandle))->useconds; + pRsp->offset = htobe64(vnodeGetOffsetVal((void*)pRetrieve->qhandle)); + pRsp->useconds = htobe64(((SQInfo *)(pRetrieve->qhandle))->useconds); } else { pRsp->offset = 0; pRsp->useconds = 0; @@ -428,19 +464,17 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { pMsg = pRsp->data; if (numOfRows > 0 && code == TSDB_CODE_SUCCESS) { - int32_t oldSize = size; vnodeSaveQueryResult((void *)(pRetrieve->qhandle), pRsp->data, &size); - if (oldSize > size) { - pRsp->compress = htons(1); // denote that the response msg is compressed - } } pMsg += size; msgLen = pMsg - pStart; + assert(code != TSDB_CODE_ACTION_IN_PROGRESS); + if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS)) { dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code); - vnodeFreeQInfoInQueue(pObj->qhandle); + vnodeDecRefCount(pObj->qhandle); pObj->qhandle = NULL; } @@ -448,8 +482,6 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { _exit: free(pSched->msg); - - return; } int vnodeProcessRetrieveRequest(char *pMsg, int msgLen, SShellObj *pObj) { @@ -465,10 +497,92 @@ int vnodeProcessRetrieveRequest(char *pMsg, int msgLen, SShellObj *pObj) { return msgLen; } +static int vnodeCheckSubmitBlockContext(SShellSubmitBlock *pBlocks, SVnodeObj *pVnode) { + int32_t sid = htonl(pBlocks->sid); + uint64_t uid = htobe64(pBlocks->uid); + + if (sid >= pVnode->cfg.maxSessions || sid <= 0) { + dError("vid:%d sid:%d, sid is out of range", sid); + return TSDB_CODE_INVALID_TABLE_ID; + } + + SMeterObj *pMeterObj = pVnode->meterList[sid]; + if (pMeterObj == NULL) { + dError("vid:%d sid:%d, not active table", pVnode->vnode, sid); + vnodeSendMeterCfgMsg(pVnode->vnode, sid); + return TSDB_CODE_NOT_ACTIVE_TABLE; + } + + if (pMeterObj->uid != uid) { + dError("vid:%d sid:%d id:%s, uid:%lld, uid in msg:%lld, uid mismatch", pVnode->vnode, sid, pMeterObj->meterId, + pMeterObj->uid, uid); + return TSDB_CODE_INVALID_SUBMIT_MSG; + } + + return TSDB_CODE_SUCCESS; +} + +static int vnodeDoSubmitJob(SVnodeObj *pVnode, int import, int32_t *ssid, int32_t esid, SShellSubmitBlock **ppBlocks, + TSKEY now, SShellObj *pObj) { + SShellSubmitBlock *pBlocks = *ppBlocks; + int code = TSDB_CODE_SUCCESS; + int32_t numOfPoints = 0; + int32_t i = 0; + SShellSubmitBlock tBlock; + + for (i = *ssid; i < esid; i++) { + numOfPoints = 0; + tBlock = *pBlocks; + + code = vnodeCheckSubmitBlockContext(pBlocks, pVnode); + if (code != TSDB_CODE_SUCCESS) break; + + SMeterObj *pMeterObj = (SMeterObj *)(pVnode->meterList[htonl(pBlocks->sid)]); + + // dont include sid, vid + int32_t subMsgLen = sizeof(pBlocks->numOfRows) + htons(pBlocks->numOfRows) * pMeterObj->bytesPerPoint; + int32_t sversion = htonl(pBlocks->sversion); + + if (import) { + code = vnodeImportPoints(pMeterObj, (char *)&(pBlocks->numOfRows), subMsgLen, TSDB_DATA_SOURCE_SHELL, pObj, + sversion, &numOfPoints, now); + pObj->numOfTotalPoints += numOfPoints; + + // records for one table should be consecutive located in the payload buffer, which is guaranteed by client + if (code == TSDB_CODE_SUCCESS) { + pObj->count--; + } + } else { + code = vnodeInsertPoints(pMeterObj, (char *)&(pBlocks->numOfRows), subMsgLen, TSDB_DATA_SOURCE_SHELL, NULL, + sversion, &numOfPoints, now); + pObj->numOfTotalPoints += numOfPoints; + } + + if (code != TSDB_CODE_SUCCESS) break; + + pBlocks = (SShellSubmitBlock *)((char *)pBlocks + sizeof(SShellSubmitBlock) + + htons(pBlocks->numOfRows) * pMeterObj->bytesPerPoint); + } + + *ssid = i; + *ppBlocks = pBlocks; + /* Since the pBlock part can be changed by the vnodeForwardToPeer interface, + * which is also possible to be used again. For that case, we just copy the original + * block content back. + */ + if (import && (code == TSDB_CODE_ACTION_IN_PROGRESS)) { + memcpy((void *)pBlocks, (void *)&tBlock, sizeof(SShellSubmitBlock)); + } + + return code; +} + int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { int code = 0, ret = 0; + int32_t i = 0; SShellSubmitMsg shellSubmit = *(SShellSubmitMsg *)pMsg; SShellSubmitMsg *pSubmit = &shellSubmit; + SShellSubmitBlock *pBlocks = NULL; pSubmit->vnode = htons(pSubmit->vnode); pSubmit->numOfSid = htonl(pSubmit->numOfSid); @@ -481,7 +595,7 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (pSubmit->vnode >= TSDB_MAX_VNODES || pSubmit->vnode < 0) { dTrace("vnode:%d is out of range", pSubmit->vnode); - code = TSDB_CODE_INVALID_SESSION_ID; + code = TSDB_CODE_INVALID_VNODE_ID; goto _submit_over; } @@ -489,7 +603,7 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (pVnode->cfg.maxSessions == 0 || pVnode->meterList == NULL) { dError("vid:%d is not activated for submit", pSubmit->vnode); vnodeSendVpeerCfgMsg(pSubmit->vnode); - code = TSDB_CODE_NOT_ACTIVE_SESSION; + code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _submit_over; } @@ -500,98 +614,75 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (tsAvailDataDirGB < tsMinimalDataDirGB) { dError("server disk space remain %.3f GB, need at least %.3f GB, stop writing", tsAvailDataDirGB, tsMinimalDataDirGB); - code = TSDB_CODE_SERVER_NO_SPACE; + code = TSDB_CODE_SERV_NO_DISKSPACE; goto _submit_over; } pObj->count = pSubmit->numOfSid; // for import pObj->code = 0; // for import - pObj->numOfTotalPoints = 0; // for import - SShellSubmitBlock *pBlocks = (SShellSubmitBlock *)(pMsg + sizeof(SShellSubmitMsg)); + pObj->numOfTotalPoints = 0; - int32_t numOfPoints = 0; - int32_t numOfTotalPoints = 0; - // We take current time here to avoid it in the for loop. TSKEY now = taosGetTimestamp(pVnode->cfg.precision); - for (int32_t i = 0; i < pSubmit->numOfSid; ++i) { - numOfPoints = 0; - - pBlocks->sid = htonl(pBlocks->sid); - pBlocks->uid = htobe64(pBlocks->uid); - - if (pBlocks->sid >= pVnode->cfg.maxSessions || pBlocks->sid <= 0) { - dTrace("sid:%d is out of range", pBlocks->sid); - code = TSDB_CODE_INVALID_SESSION_ID; - goto _submit_over; - } - - int vnode = pSubmit->vnode; - int sid = pBlocks->sid; + pBlocks = (SShellSubmitBlock *)(pMsg + sizeof(SShellSubmitMsg)); + i = 0; + code = vnodeDoSubmitJob(pVnode, pSubmit->import, &i, pSubmit->numOfSid, &pBlocks, now, pObj); - SMeterObj *pMeterObj = vnodeList[vnode].meterList[sid]; - if (pMeterObj == NULL) { - dError("vid:%d sid:%d, no active session", vnode, sid); - vnodeSendMeterCfgMsg(vnode, sid); - code = TSDB_CODE_NOT_ACTIVE_SESSION; - goto _submit_over; +_submit_over: + ret = 0; + if (pSubmit->import) { // Import case + if (code == TSDB_CODE_ACTION_IN_PROGRESS) { + + SBatchSubmitInfo *pSubmitInfo = + (SBatchSubmitInfo *)calloc(1, sizeof(SBatchSubmitInfo) + msgLen - sizeof(SShellSubmitMsg)); + if (pSubmitInfo == NULL) { + code = TSDB_CODE_SERV_OUT_OF_MEMORY; + ret = vnodeSendShellSubmitRspMsg(pObj, code, pObj->numOfTotalPoints); + } else { // Start a timer to process the next part of request + pSubmitInfo->import = 1; + pSubmitInfo->vnode = pSubmit->vnode; + pSubmitInfo->numOfSid = pSubmit->numOfSid; + pSubmitInfo->ssid = i; // start from this position, not the initial position + pSubmitInfo->pObj = pObj; + pSubmitInfo->offset = ((char *)pBlocks) - (pMsg + sizeof(SShellSubmitMsg)); + assert(pSubmitInfo->offset >= 0); + memcpy((void *)(pSubmitInfo->blks), (void *)(pMsg + sizeof(SShellSubmitMsg)), msgLen - sizeof(SShellSubmitMsg)); + taosTmrStart(vnodeProcessBatchSubmitTimer, 10, (void *)pSubmitInfo, vnodeTmrCtrl); + } + } else { + if (code == TSDB_CODE_SUCCESS) assert(pObj->count == 0); + ret = vnodeSendShellSubmitRspMsg(pObj, code, pObj->numOfTotalPoints); } + } else { // Insert case + ret = vnodeSendShellSubmitRspMsg(pObj, code, pObj->numOfTotalPoints); + } - if (pMeterObj->uid != pBlocks->uid) { - dError("vid:%d sid:%d, meterId:%s, uid:%lld, uid in msg:%lld, uid mismatch", vnode, sid, pMeterObj->meterId, - pMeterObj->uid, pBlocks->uid); - code = TSDB_CODE_INVALID_SUBMIT_MSG; - goto _submit_over; - } + atomic_fetch_add_32(&vnodeInsertReqNum, 1); + return ret; +} - // dont include sid, vid - int subMsgLen = sizeof(pBlocks->numOfRows) + htons(pBlocks->numOfRows) * pMeterObj->bytesPerPoint; - int sversion = htonl(pBlocks->sversion); +static void vnodeProcessBatchSubmitTimer(void *param, void *tmrId) { + SBatchSubmitInfo *pSubmitInfo = (SBatchSubmitInfo *)param; + assert(pSubmitInfo != NULL && pSubmitInfo->import); - int32_t state = TSDB_METER_STATE_READY; - if (pSubmit->import) { - state = vnodeSetMeterState(pMeterObj, TSDB_METER_STATE_IMPORTING); - } else { - state = vnodeSetMeterState(pMeterObj, TSDB_METER_STATE_INSERT); - } + int32_t i = 0; + int32_t code = TSDB_CODE_SUCCESS; - if (state == TSDB_METER_STATE_READY) { - // meter status is ready for insert/import - if (pSubmit->import) { - code = vnodeImportPoints(pMeterObj, (char *) &(pBlocks->numOfRows), subMsgLen, TSDB_DATA_SOURCE_SHELL, pObj, - sversion, &numOfPoints, now); - vnodeClearMeterState(pMeterObj, TSDB_METER_STATE_IMPORTING); - } else { - code = vnodeInsertPoints(pMeterObj, (char *) &(pBlocks->numOfRows), subMsgLen, TSDB_DATA_SOURCE_SHELL, NULL, - sversion, &numOfPoints, now); - vnodeClearMeterState(pMeterObj, TSDB_METER_STATE_INSERT); - } + SShellObj * pShell = pSubmitInfo->pObj; + SVnodeObj * pVnode = &vnodeList[pSubmitInfo->vnode]; + SShellSubmitBlock *pBlocks = (SShellSubmitBlock *)(pSubmitInfo->blks + pSubmitInfo->offset); + TSKEY now = taosGetTimestamp(pVnode->cfg.precision); + i = pSubmitInfo->ssid; - if (code != TSDB_CODE_SUCCESS) {break;} - } else { - if (vnodeIsMeterState(pMeterObj, TSDB_METER_STATE_DELETING)) { - dTrace("vid:%d sid:%d id:%s, it is removed, state:%d", pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, - pMeterObj->state); - code = TSDB_CODE_NOT_ACTIVE_SESSION; - break; - } else {// waiting for 300ms by default and try again - dTrace("vid:%d sid:%d id:%s, try submit again since in state:%d", pMeterObj->vnode, pMeterObj->sid, - pMeterObj->meterId, pMeterObj->state); - - code = TSDB_CODE_ACTION_IN_PROGRESS; - break; - } - } + code = vnodeDoSubmitJob(pVnode, pSubmitInfo->import, &i, pSubmitInfo->numOfSid, &pBlocks, now, pShell); - numOfTotalPoints += numOfPoints; - pBlocks = (SShellSubmitBlock *)((char *)pBlocks + sizeof(SShellSubmitBlock) + - htons(pBlocks->numOfRows) * pMeterObj->bytesPerPoint); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) { + pSubmitInfo->ssid = i; + pSubmitInfo->offset = ((char *)pBlocks) - pSubmitInfo->blks; + taosTmrStart(vnodeProcessBatchSubmitTimer, 10, (void *)pSubmitInfo, vnodeTmrCtrl); + } else { + if (code == TSDB_CODE_SUCCESS) assert(pShell->count == 0); + tfree(param); + vnodeSendShellSubmitRspMsg(pShell, code, pShell->numOfTotalPoints); } - -_submit_over: - // for import, send the submit response only when return code is not zero - if (pSubmit->import == 0 || code != 0) ret = vnodeSendShellSubmitRspMsg(pObj, code, numOfTotalPoints); - - __sync_fetch_and_add(&vnodeInsertReqNum, 1); - return ret; } diff --git a/src/system/detail/src/vnodeStatus.c b/src/system/detail/src/vnodeStatus.c new file mode 100644 index 0000000000000000000000000000000000000000..4756496f4018c23d2890139009d27636f8cc3af8 --- /dev/null +++ b/src/system/detail/src/vnodeStatus.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "taosmsg.h" +#include "tsdb.h" +#include "vnodeStatus.h" + +const char* taosGetVgroupStatusStr(int32_t vgroupStatus) { + switch (vgroupStatus) { + case TSDB_VG_STATUS_READY: return "ready"; + case TSDB_VG_STATUS_IN_PROGRESS: return "inprogress"; + case TSDB_VG_STATUS_COMMITLOG_INIT_FAILED: return "commitlog_init_failed"; + case TSDB_VG_STATUS_INIT_FAILED: return "init_failed"; + case TSDB_VG_STATUS_FULL: return "full"; + default: return "undefined"; + } +} + +const char* taosGetDbStatusStr(int32_t dbStatus) { + switch (dbStatus) { + case TSDB_DB_STATUS_READY: return "ready"; + case TSDB_DB_STATUS_DROPPING: return "dropping"; + case TSDB_DB_STATUS_DROP_FROM_SDB: return "drop_from_sdb"; + default: return "undefined"; + } +} + +const char* taosGetVnodeStatusStr(int32_t vnodeStatus) { + switch (vnodeStatus) { + case TSDB_VN_STATUS_OFFLINE: return "offline"; + case TSDB_VN_STATUS_CREATING: return "creating"; + case TSDB_VN_STATUS_UNSYNCED: return "unsynced"; + case TSDB_VN_STATUS_SLAVE: return "slave"; + case TSDB_VN_STATUS_MASTER: return "master"; + case TSDB_VN_STATUS_CLOSING: return "closing"; + case TSDB_VN_STATUS_DELETING: return "deleting"; + default: return "undefined"; + } +} + +const char* taosGetVnodeSyncStatusStr(int32_t vnodeSyncStatus) { + switch (vnodeSyncStatus) { + case TSDB_VN_SYNC_STATUS_INIT: return "init"; + case TSDB_VN_SYNC_STATUS_SYNCING: return "syncing"; + case TSDB_VN_SYNC_STATUS_SYNC_CACHE: return "sync_cache"; + case TSDB_VN_SYNC_STATUS_SYNC_FILE: return "sync_file"; + default: return "undefined"; + } +} + +const char* taosGetVnodeDropStatusStr(int32_t dropping) { + switch (dropping) { + case TSDB_VN_DROP_STATUS_READY: return "ready"; + case TSDB_VN_DROP_STATUS_DROPPING: return "dropping"; + default: return "undefined"; + } +} + +const char* taosGetDnodeStatusStr(int32_t dnodeStatus) { + switch (dnodeStatus) { + case TSDB_DN_STATUS_OFFLINE: return "offline"; + case TSDB_DN_STATUS_READY: return "ready"; + default: return "undefined"; + } +} + +const char* taosGetDnodeLbStatusStr(int32_t dnodeBalanceStatus) { + switch (dnodeBalanceStatus) { + case TSDB_DN_LB_STATUS_BALANCED: return "balanced"; + case TSDB_DN_LB_STATUS_BALANCING: return "balancing"; + case TSDB_DN_LB_STATUS_OFFLINE_REMOVING: return "offline removing"; + case TSDB_DN_LB_STATE_SHELL_REMOVING: return "removing"; + default: return "undefined"; + } +} + +const char* taosGetVgroupLbStatusStr(int32_t vglbStatus) { + switch (vglbStatus) { + case TSDB_VG_LB_STATUS_READY: return "ready"; + case TSDB_VG_LB_STATUS_UPDATE: return "updating"; + default: return "undefined"; + } +} + +const char* taosGetVnodeStreamStatusStr(int32_t vnodeStreamStatus) { + switch (vnodeStreamStatus) { + case TSDB_VN_STREAM_STATUS_START: return "start"; + case TSDB_VN_STREAM_STATUS_STOP: return "stop"; + default: return "undefined"; + } +} + +const char* taosGetTableStatusStr(int32_t tableStatus) { + switch(tableStatus) { + case TSDB_METER_STATE_INSERTING: return "inserting"; + case TSDB_METER_STATE_IMPORTING:return "importing"; + case TSDB_METER_STATE_UPDATING: return "updating"; + case TSDB_METER_STATE_DROPPING: return "deleting"; + case TSDB_METER_STATE_DROPPED: return "dropped"; + case TSDB_METER_STATE_READY: return "ready"; + default:return "undefined"; + } +} diff --git a/src/system/detail/src/vnodeStore.c b/src/system/detail/src/vnodeStore.c index e00b4de7bc42d611b5fd7fe0f2d2652117e41f4f..360216e9645f6e1bbbc9d15884bc9996381e55b8 100644 --- a/src/system/detail/src/vnodeStore.c +++ b/src/system/detail/src/vnodeStore.c @@ -14,11 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include +#include "os.h" #include "dnodeSystem.h" #include "trpc.h" @@ -26,20 +22,20 @@ #include "vnode.h" #include "vnodeStore.h" #include "vnodeUtil.h" - -#pragma GCC diagnostic push -#pragma GCC diagnostic warning "-Woverflow" +#include "vnodeStatus.h" int tsMaxVnode = -1; int tsOpenVnodes = 0; SVnodeObj *vnodeList = NULL; -int vnodeInitStoreVnode(int vnode) { +static int vnodeInitStoreVnode(int vnode) { SVnodeObj *pVnode = vnodeList + vnode; pVnode->vnode = vnode; vnodeOpenMetersVnode(vnode); - if (pVnode->cfg.maxSessions == 0) return 0; + if (pVnode->cfg.maxSessions <= 0) { + return TSDB_CODE_SUCCESS; + } pVnode->firstKey = taosGetTimestamp(pVnode->cfg.precision); @@ -49,9 +45,10 @@ int vnodeInitStoreVnode(int vnode) { return -1; } - if (vnodeInitFile(vnode) < 0) return -1; - - // vnodeOpenMeterMgmtStoreVnode(vnode); + if (vnodeInitFile(vnode) < 0) { + dError("vid:%d, files init failed.", pVnode->vnode); + return -1; + } if (vnodeInitCommit(vnode) < 0) { dError("vid:%d, commit init failed.", pVnode->vnode); @@ -59,7 +56,7 @@ int vnodeInitStoreVnode(int vnode) { } pthread_mutex_init(&(pVnode->vmutex), NULL); - dTrace("vid:%d, storage initialized, version:%ld fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, + dPrint("vid:%d, storage initialized, version:%ld fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, pVnode->numOfFiles); return 0; @@ -74,10 +71,17 @@ int vnodeOpenVnode(int vnode) { pVnode->accessState = TSDB_VN_ALL_ACCCESS; // vnode is empty - if (pVnode->cfg.maxSessions == 0) return 0; + if (pVnode->cfg.maxSessions <= 0) { + return TSDB_CODE_SUCCESS; + } + + if (!(pVnode->vnodeStatus == TSDB_VN_STATUS_OFFLINE || pVnode->vnodeStatus == TSDB_VN_STATUS_CREATING)) { + dError("vid:%d, status:%s, cannot enter open operation", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus)); + return TSDB_CODE_INVALID_VNODE_STATUS; + } + dPrint("vid:%d, status:%s, start to open", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus)); pthread_mutex_lock(&dmutex); - // vnodeOpenMeterMgmtVnode(vnode); // not enough memory, abort if ((code = vnodeOpenShellVnode(vnode)) != TSDB_CODE_SUCCESS) { @@ -97,14 +101,13 @@ int vnodeOpenVnode(int vnode) { vnodeOpenStreams(pVnode, NULL); #endif - dTrace("vid:%d, vnode is opened, openVnodes:%d", vnode, tsOpenVnodes); + dPrint("vid:%d, vnode is opened, openVnodes:%d, status:%s", vnode, tsOpenVnodes, taosGetVnodeStatusStr(pVnode->vnodeStatus)); - return 0; + return TSDB_CODE_SUCCESS; } static int32_t vnodeMarkAllMetersDropped(SVnodeObj* pVnode) { if (pVnode->meterList == NULL) { - assert(pVnode->cfg.maxSessions == 0); return TSDB_CODE_SUCCESS; } @@ -115,7 +118,7 @@ static int32_t vnodeMarkAllMetersDropped(SVnodeObj* pVnode) { } else { // set the meter is to be deleted SMeterObj* pObj = pVnode->meterList[sid]; if (pObj != NULL) { - pObj->state = TSDB_METER_STATE_DELETED; + pObj->state = TSDB_METER_STATE_DROPPED; } } } @@ -123,7 +126,7 @@ static int32_t vnodeMarkAllMetersDropped(SVnodeObj* pVnode) { return ready? TSDB_CODE_SUCCESS:TSDB_CODE_ACTION_IN_PROGRESS; } -int vnodeCloseVnode(int vnode) { +static int vnodeCloseVnode(int vnode) { if (vnodeList == NULL) return TSDB_CODE_SUCCESS; SVnodeObj* pVnode = &vnodeList[vnode]; @@ -134,12 +137,23 @@ int vnodeCloseVnode(int vnode) { return TSDB_CODE_SUCCESS; } + if (pVnode->vnodeStatus == TSDB_VN_STATUS_DELETING) { + dPrint("vid:%d, status:%s, another thread performed delete operation", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus)); + return TSDB_CODE_SUCCESS; + } else { + dPrint("vid:%d, status:%s, enter close operation", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus)); + pVnode->vnodeStatus = TSDB_VN_STATUS_CLOSING; + } + // set the meter is dropped flag if (vnodeMarkAllMetersDropped(pVnode) != TSDB_CODE_SUCCESS) { pthread_mutex_unlock(&dmutex); return TSDB_CODE_ACTION_IN_PROGRESS; } + dPrint("vid:%d, status:%s, enter delete operation", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus)); + pVnode->vnodeStatus = TSDB_VN_STATUS_DELETING; + vnodeCloseStream(vnodeList + vnode); vnodeCancelCommit(vnodeList + vnode); vnodeClosePeerVnode(vnode); @@ -153,9 +167,6 @@ int vnodeCloseVnode(int vnode) { if (tsMaxVnode == vnode) tsMaxVnode = vnode - 1; tfree(vnodeList[vnode].meterIndex); - memset(vnodeList + vnode, 0, sizeof(SVnodeObj)); - - vnodeCalcOpenVnodes(); pthread_mutex_unlock(&dmutex); return TSDB_CODE_SUCCESS; @@ -164,7 +175,12 @@ int vnodeCloseVnode(int vnode) { int vnodeCreateVnode(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { char fileName[128]; - vnodeList[vnode].status = TSDB_STATUS_CREATING; + if (vnodeList[vnode].vnodeStatus != TSDB_VN_STATUS_OFFLINE) { + dError("vid:%d, status:%s, cannot enter create operation", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); + return TSDB_CODE_INVALID_VNODE_STATUS; + } + + vnodeList[vnode].vnodeStatus = TSDB_VN_STATUS_CREATING; sprintf(fileName, "%s/vnode%d", tsDirectory, vnode); mkdir(fileName, 0755); @@ -181,14 +197,14 @@ int vnodeCreateVnode(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { return TSDB_CODE_VG_INIT_FAILED; } - if (vnodeInitStoreVnode(vnode) != 0) { + if (vnodeInitStoreVnode(vnode) < 0) { return TSDB_CODE_VG_COMMITLOG_INIT_FAILED; } return vnodeOpenVnode(vnode); } -void vnodeRemoveDataFiles(int vnode) { +static void vnodeRemoveDataFiles(int vnode) { char vnodeDir[TSDB_FILENAME_LEN]; char dfilePath[TSDB_FILENAME_LEN]; char linkFile[TSDB_FILENAME_LEN]; @@ -216,7 +232,7 @@ void vnodeRemoveDataFiles(int vnode) { if (tcode >= 0) { remove(dfilePath); - dTrace("Data file %s is removed, link file %s", dfilePath, linkFile); + dPrint("Data file %s is removed, link file %s", dfilePath, linkFile); } } else { remove(de->d_name); @@ -231,21 +247,31 @@ void vnodeRemoveDataFiles(int vnode) { sprintf(vnodeDir, "%s/vnode%d", tsDirectory, vnode); rmdir(vnodeDir); - dTrace("vnode %d is removed!", vnode); + dPrint("vid:%d, vnode is removed, status:%s", vnode, taosGetVnodeStatusStr(vnodeList[vnode].vnodeStatus)); } int vnodeRemoveVnode(int vnode) { if (vnodeList == NULL) return TSDB_CODE_SUCCESS; if (vnodeList[vnode].cfg.maxSessions > 0) { - int32_t ret = vnodeCloseVnode(vnode); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + SVnodeObj* pVnode = &vnodeList[vnode]; + if (pVnode->vnodeStatus == TSDB_VN_STATUS_CREATING + || pVnode->vnodeStatus == TSDB_VN_STATUS_OFFLINE + || pVnode->vnodeStatus == TSDB_VN_STATUS_DELETING) { + dTrace("vid:%d, status:%s, cannot enter close/delete operation", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus)); + return TSDB_CODE_ACTION_IN_PROGRESS; + } else { + int32_t ret = vnodeCloseVnode(vnode); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + dTrace("vid:%d, status:%s, do delete operation", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus)); + vnodeRemoveDataFiles(vnode); } - vnodeRemoveDataFiles(vnode); } else { - dTrace("vid:%d, max sessions:%d, this vnode already dropped!!!", vnode, vnodeList[vnode].cfg.maxSessions); + dPrint("vid:%d, max sessions:%d, this vnode already dropped!!!", vnode, vnodeList[vnode].cfg.maxSessions); vnodeList[vnode].cfg.maxSessions = 0; //reset value vnodeCalcOpenVnodes(); } @@ -297,7 +323,7 @@ void vnodeCleanUpOneVnode(int vnode) { again = 1; if (vnodeList[vnode].pCachePool) { - vnodeList[vnode].status = TSDB_STATUS_OFFLINE; + vnodeList[vnode].vnodeStatus = TSDB_VN_STATUS_OFFLINE; vnodeClosePeerVnode(vnode); } @@ -326,7 +352,7 @@ void vnodeCleanUpVnodes() { for (int vnode = 0; vnode < TSDB_MAX_VNODES; ++vnode) { if (vnodeList[vnode].pCachePool) { - vnodeList[vnode].status = TSDB_STATUS_OFFLINE; + vnodeList[vnode].vnodeStatus = TSDB_VN_STATUS_OFFLINE; vnodeClosePeerVnode(vnode); } } @@ -351,12 +377,9 @@ void vnodeCalcOpenVnodes() { openVnodes++; } - __sync_val_compare_and_swap(&tsOpenVnodes, tsOpenVnodes, openVnodes); + atomic_store_32(&tsOpenVnodes, openVnodes); } void vnodeUpdateHeadFile(int vnode, int oldTables, int newTables) { //todo rewrite the head file with newTables } - -#pragma GCC diagnostic pop - diff --git a/src/system/detail/src/vnodeStream.c b/src/system/detail/src/vnodeStream.c index 0667ee77bdab731e06147af28195fecfc48fc79b..7ee20a2e59562f63903fb2ea13228070f14f0932 100644 --- a/src/system/detail/src/vnodeStream.c +++ b/src/system/detail/src/vnodeStream.c @@ -17,6 +17,7 @@ #include "taosmsg.h" #include "vnode.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" /* static TAOS *dbConn = NULL; */ void vnodeCloseStreamCallback(void *param); @@ -55,14 +56,11 @@ void vnodeProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { contLen += sizeof(SSubmitMsg); int32_t numOfPoints = 0; + int32_t code = vnodeInsertPoints(pObj, (char *)pMsg, contLen, TSDB_DATA_SOURCE_SHELL, NULL, pObj->sversion, + &numOfPoints, taosGetTimestamp(vnodeList[pObj->vnode].cfg.precision)); - int32_t state = vnodeSetMeterState(pObj, TSDB_METER_STATE_INSERT); - if (state == TSDB_METER_STATE_READY) { - vnodeInsertPoints(pObj, (char *)pMsg, contLen, TSDB_DATA_SOURCE_SHELL, NULL, pObj->sversion, &numOfPoints, taosGetTimestamp(vnodeList[pObj->vnode].cfg.precision)); - vnodeClearMeterState(pObj, TSDB_METER_STATE_INSERT); - } else { - dError("vid:%d sid:%d id:%s, failed to insert continuous query results, state:%d", pObj->vnode, pObj->sid, - pObj->meterId, state); + if (code != TSDB_CODE_SUCCESS) { + dError("vid:%d sid:%d id:%s, failed to insert continuous query results", pObj->vnode, pObj->sid, pObj->meterId); } assert(numOfPoints >= 0 && numOfPoints <= 1); @@ -80,7 +78,7 @@ void vnodeOpenStreams(void *param, void *tmrId) { SVnodeObj *pVnode = (SVnodeObj *)param; SMeterObj *pObj; - if (pVnode->streamRole == 0) return; + if (pVnode->streamRole == TSDB_VN_STREAM_STATUS_STOP) return; if (pVnode->meterList == NULL) return; taosTmrStopA(&pVnode->streamTimer); @@ -88,7 +86,7 @@ void vnodeOpenStreams(void *param, void *tmrId) { for (int sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { pObj = pVnode->meterList[sid]; - if (pObj == NULL || pObj->sqlLen == 0 || vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) continue; + if (pObj == NULL || pObj->sqlLen == 0 || vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) continue; dTrace("vid:%d sid:%d id:%s, open stream:%s", pObj->vnode, sid, pObj->meterId, pObj->pSql); @@ -119,7 +117,7 @@ void vnodeCreateStream(SMeterObj *pObj) { SVnodeObj *pVnode = vnodeList + pObj->vnode; - if (pVnode->streamRole == 0) return; + if (pVnode->streamRole == TSDB_VN_STREAM_STATUS_STOP) return; if (pObj->pStream) return; dTrace("vid:%d sid:%d id:%s stream:%s is created", pObj->vnode, pObj->sid, pObj->meterId, pObj->pSql); @@ -154,7 +152,7 @@ void vnodeRemoveStream(SMeterObj *pObj) { // Close all streams in a vnode void vnodeCloseStream(SVnodeObj *pVnode) { SMeterObj *pObj; - dTrace("vid:%d, stream is closed, old role:%d", pVnode->vnode, pVnode->streamRole); + dPrint("vid:%d, stream is closed, old role %s", pVnode->vnode, taosGetVnodeStreamStatusStr(pVnode->streamRole)); // stop stream computing for (int sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { @@ -171,17 +169,18 @@ void vnodeCloseStream(SVnodeObj *pVnode) { void vnodeUpdateStreamRole(SVnodeObj *pVnode) { /* SMeterObj *pObj; */ - int newRole = (pVnode->status == TSDB_STATUS_MASTER) ? 1 : 0; + int newRole = (pVnode->vnodeStatus == TSDB_VN_STATUS_MASTER) ? TSDB_VN_STREAM_STATUS_START : TSDB_VN_STREAM_STATUS_STOP; if (newRole != pVnode->streamRole) { - dTrace("vid:%d, stream role is changed to:%d", pVnode->vnode, newRole); + dPrint("vid:%d, stream role is changed from %s to %s", + pVnode->vnode, taosGetVnodeStreamStatusStr(pVnode->streamRole), taosGetVnodeStreamStatusStr(newRole)); pVnode->streamRole = newRole; - if (newRole) { + if (newRole == TSDB_VN_STREAM_STATUS_START) { vnodeOpenStreams(pVnode, NULL); } else { vnodeCloseStream(pVnode); } } else { - dTrace("vid:%d, stream role is keep to:%d", pVnode->vnode, newRole); + dPrint("vid:%d, stream role is keep to %s", pVnode->vnode, taosGetVnodeStreamStatusStr(pVnode->streamRole)); } } diff --git a/src/system/detail/src/vnodeSystem.c b/src/system/detail/src/vnodeSystem.c index 2f350db3fa4acee7aef6951b770ca5a62ae05acd..b23050ab03eeeeb9de329d7259da352d48fda23b 100644 --- a/src/system/detail/src/vnodeSystem.c +++ b/src/system/detail/src/vnodeSystem.c @@ -14,15 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "tsdb.h" #include "tsocket.h" @@ -44,8 +36,14 @@ void vnodeCleanUpSystem() { bool vnodeInitQueryHandle() { int numOfThreads = tsRatioOfQueryThreads * tsNumOfCores * tsNumOfThreadsPerCore; - if (numOfThreads < 1) numOfThreads = 1; - queryQhandle = taosInitScheduler(tsNumOfVnodesPerCore * tsNumOfCores * tsSessionsPerVnode, numOfThreads, "query"); + if (numOfThreads < 1) { + numOfThreads = 1; + } + + int32_t maxQueueSize = tsNumOfVnodesPerCore * tsNumOfCores * tsSessionsPerVnode; + dTrace("query task queue initialized, max slot:%d, task threads:%d", maxQueueSize,numOfThreads); + + queryQhandle = taosInitSchedulerWithInfo(maxQueueSize, numOfThreads, "query", vnodeTmrCtrl); return true; } @@ -60,15 +58,15 @@ bool vnodeInitTmrCtl() { int vnodeInitSystem() { - if (!vnodeInitQueryHandle()) { - dError("failed to init query qhandle, exit"); - return -1; - } - if (!vnodeInitTmrCtl()) { dError("failed to init timer, exit"); return -1; } + + if (!vnodeInitQueryHandle()) { + dError("failed to init query qhandle, exit"); + return -1; + } if (vnodeInitStore() < 0) { dError("failed to init vnode storage"); diff --git a/src/system/detail/src/vnodeTagMgmt.c b/src/system/detail/src/vnodeTagMgmt.c index 9b76e148546267ebebd445d90856cf4d9201dc4d..adf4e544bbf9efea877aa3f688afc981437815e6 100644 --- a/src/system/detail/src/vnodeTagMgmt.c +++ b/src/system/detail/src/vnodeTagMgmt.c @@ -14,9 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include +#include "os.h" #include "tsdb.h" #include "tlog.h" diff --git a/src/system/detail/src/vnodeUtil.c b/src/system/detail/src/vnodeUtil.c index 790ad1c4a10b5d63987a6f1eeebc184605b773d3..943bec4250884330d167e83188662f1b5b97f8cd 100644 --- a/src/system/detail/src/vnodeUtil.c +++ b/src/system/detail/src/vnodeUtil.c @@ -14,10 +14,7 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include +#include "os.h" #include "tast.h" #include "tscUtil.h" @@ -25,8 +22,7 @@ #include "vnode.h" #include "vnodeDataFilterFunc.h" #include "vnodeUtil.h" - -#pragma GCC diagnostic ignored "-Wint-conversion" +#include "vnodeStatus.h" int vnodeCheckFileIntegrity(FILE* fp) { /* @@ -286,10 +282,11 @@ SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t* int32_t param = pExprs[i].pBase.arg[0].argValue.i64; if (getResultDataInfo(type, bytes, pExprs[i].pBase.functionId, param, &pExprs[i].resType, &pExprs[i].resBytes, &pExprs[i].interResBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { + *code = TSDB_CODE_INVALID_QUERY_MSG; return NULL; } - if (pExprs[i].pBase.functionId == TSDB_FUNC_TAG_DUMMY) { + if (pExprs[i].pBase.functionId == TSDB_FUNC_TAG_DUMMY || pExprs[i].pBase.functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pExprs[i].resBytes; } assert(isValidDataType(pExprs[i].resType, pExprs[i].resBytes)); @@ -551,30 +548,38 @@ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSid for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { SMeterObj* pMeter = pVnode->meterList[pSids[i]->sid]; - if (pMeter == NULL || (pMeter->state > TSDB_METER_STATE_INSERT)) { - if (pMeter == NULL || vnodeIsMeterState(pMeter, TSDB_METER_STATE_DELETING)) { - code = TSDB_CODE_NOT_ACTIVE_SESSION; - dError("qmsg:%p, vid:%d sid:%d, not there or will be dropped", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid); - vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); - } else {//update or import - code = TSDB_CODE_ACTION_IN_PROGRESS; - dTrace("qmsg:%p, vid:%d sid:%d id:%s, it is in state:%d, wait!", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid, - pMeter->meterId, pMeter->state); - } - } else { - /* - * vnodeIsSafeToDeleteMeter will wait for this function complete, and then it can - * check if the numOfQueries is 0 or not. - */ - pMeterObjList[(*numOfInc)++] = pMeter; - __sync_fetch_and_add(&pMeter->numOfQueries, 1); - - // output for meter more than one query executed - if (pMeter->numOfQueries > 1) { - dTrace("qmsg:%p, vid:%d sid:%d id:%s, inc query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, - pMeter->meterId, pMeter->numOfQueries); - num++; - } + if (pMeter == NULL || vnodeIsMeterState(pMeter, TSDB_METER_STATE_DROPPING)) { + code = TSDB_CODE_NOT_ACTIVE_TABLE; + dError("qmsg:%p, vid:%d sid:%d, not there or will be dropped", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid); + + vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); + continue; + } else if (pMeter->uid != pSids[i]->uid || pMeter->sid != pSids[i]->sid) { + code = TSDB_CODE_TABLE_ID_MISMATCH; + dError("qmsg:%p, vid:%d sid:%d id:%s uid:%lld, id mismatch. sid:%d uid:%lld in msg", pQueryMsg, + pQueryMsg->vnode, pMeter->sid, pMeter->meterId, pMeter->uid, pSids[i]->sid, pSids[i]->uid); + + vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); + continue; + } else if (pMeter->state > TSDB_METER_STATE_INSERTING) { //update or import + code = TSDB_CODE_ACTION_IN_PROGRESS; + dTrace("qmsg:%p, vid:%d sid:%d id:%s, it is in state:%s, wait!", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid, + pMeter->meterId, taosGetTableStatusStr(pMeter->state)); + continue; + } + + /* + * vnodeIsSafeToDeleteMeter will wait for this function complete, and then it can + * check if the numOfQueries is 0 or not. + */ + pMeterObjList[(*numOfInc)++] = pMeter; + atomic_fetch_add_32(&pMeter->numOfQueries, 1); + + // output for meter more than one query executed + if (pMeter->numOfQueries > 1) { + dTrace("qmsg:%p, vid:%d sid:%d id:%s, inc query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, + pMeter->meterId, pMeter->numOfQueries); + num++; } } @@ -591,7 +596,7 @@ void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, SMeterObj* pMeter = pMeterObjList[i]; if (pMeter != NULL) { // here, do not need to lock to perform operations - __sync_fetch_and_sub(&pMeter->numOfQueries, 1); + atomic_fetch_sub_32(&pMeter->numOfQueries, 1); if (pMeter->numOfQueries > 0) { dTrace("qmsg:%p, vid:%d sid:%d id:%s dec query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, @@ -629,16 +634,16 @@ void vnodeUpdateQueryColumnIndex(SQuery* pQuery, SMeterObj* pMeterObj) { return; } - for(int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - SSqlFuncExprMsg* pSqlExprMsg = &pQuery->pSelectExpr[i].pBase; + for(int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + SSqlFuncExprMsg* pSqlExprMsg = &pQuery->pSelectExpr[k].pBase; if (pSqlExprMsg->functionId == TSDB_FUNC_ARITHM || pSqlExprMsg->colInfo.flag == TSDB_COL_TAG) { continue; } SColIndexEx* pColIndexEx = &pSqlExprMsg->colInfo; - for(int32_t j = 0; j < pQuery->numOfCols; ++j) { - if (pColIndexEx->colId == pQuery->colList[j].data.colId) { - pColIndexEx->colIdx = pQuery->colList[j].colIdx; + for(int32_t f = 0; f < pQuery->numOfCols; ++f) { + if (pColIndexEx->colId == pQuery->colList[f].data.colId) { + pColIndexEx->colIdx = pQuery->colList[f].colIdx; break; } } @@ -646,7 +651,7 @@ void vnodeUpdateQueryColumnIndex(SQuery* pQuery, SMeterObj* pMeterObj) { } int32_t vnodeSetMeterState(SMeterObj* pMeterObj, int32_t state) { - return __sync_val_compare_and_swap(&pMeterObj->state, TSDB_METER_STATE_READY, state); + return atomic_val_compare_exchange_32(&pMeterObj->state, TSDB_METER_STATE_READY, state); } void vnodeClearMeterState(SMeterObj* pMeterObj, int32_t state) { @@ -656,7 +661,7 @@ void vnodeClearMeterState(SMeterObj* pMeterObj, int32_t state) { bool vnodeIsMeterState(SMeterObj* pMeterObj, int32_t state) { if (state == TSDB_METER_STATE_READY) { return pMeterObj->state == TSDB_METER_STATE_READY; - } else if (state == TSDB_METER_STATE_DELETING) { + } else if (state == TSDB_METER_STATE_DROPPING) { return pMeterObj->state >= state; } else { return (((pMeterObj->state) & state) == state); @@ -668,23 +673,43 @@ void vnodeSetMeterDeleting(SMeterObj* pMeterObj) { return; } - pMeterObj->state |= TSDB_METER_STATE_DELETING; + pMeterObj->state |= TSDB_METER_STATE_DROPPING; +} + +int32_t vnodeSetMeterInsertImportStateEx(SMeterObj* pObj, int32_t st) { + int32_t code = TSDB_CODE_SUCCESS; + + int32_t state = vnodeSetMeterState(pObj, st); + if (state != TSDB_METER_STATE_READY) {//return to denote import is not performed + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { + dTrace("vid:%d sid:%d id:%s, meter is deleted, state:%d", pObj->vnode, pObj->sid, pObj->meterId, + pObj->state); + code = TSDB_CODE_NOT_ACTIVE_TABLE; + } else {// waiting for 300ms by default and try again + dTrace("vid:%d sid:%d id:%s, try submit again since in state:%d", pObj->vnode, pObj->sid, + pObj->meterId, pObj->state); + + code = TSDB_CODE_ACTION_IN_PROGRESS; + } + } + + return code; } bool vnodeIsSafeToDeleteMeter(SVnodeObj* pVnode, int32_t sid) { SMeterObj* pObj = pVnode->meterList[sid]; - if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETED)) { + if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPED)) { return true; } - int32_t prev = vnodeSetMeterState(pObj, TSDB_METER_STATE_DELETING); + int32_t prev = vnodeSetMeterState(pObj, TSDB_METER_STATE_DROPPING); /* * if the meter is not in ready/deleting state, it must be in insert/import/update, * set the deleting state and wait the procedure to be completed */ - if (prev != TSDB_METER_STATE_READY && prev < TSDB_METER_STATE_DELETING) { + if (prev != TSDB_METER_STATE_READY && prev < TSDB_METER_STATE_DROPPING) { vnodeSetMeterDeleting(pObj); dWarn("vid:%d sid:%d id:%s, can not be deleted, state:%d, wait", pObj->vnode, pObj->sid, pObj->meterId, prev); @@ -694,7 +719,7 @@ bool vnodeIsSafeToDeleteMeter(SVnodeObj* pVnode, int32_t sid) { bool ready = true; /* - * the query will be stopped ASAP, since the state of meter is set to TSDB_METER_STATE_DELETING, + * the query will be stopped ASAP, since the state of meter is set to TSDB_METER_STATE_DROPPING, * and new query will abort since the meter is deleted. */ pthread_mutex_lock(&pVnode->vmutex); diff --git a/src/system/lite/CMakeLists.txt b/src/system/lite/CMakeLists.txt index 965f7666b056dbf21ffcd259c922503f24665666..a22ed60563e5a237d060abfc46dc9fa87e546a87 100644 --- a/src/system/lite/CMakeLists.txt +++ b/src/system/lite/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/modules/http/inc) diff --git a/src/system/lite/src/dnodeMgmt.spec.c b/src/system/lite/src/dnodeMgmt.spec.c index 00e7e469c04847f6578f24c2f0a2ce30f09934d6..576dbcb337b329571296d331e043e2ab1bc8d92c 100644 --- a/src/system/lite/src/dnodeMgmt.spec.c +++ b/src/system/lite/src/dnodeMgmt.spec.c @@ -50,7 +50,7 @@ char *taosBuildReqMsgToMnode(SMgmtObj *pObj, char type) { } int taosSendMsgToMnode(SMgmtObj *pObj, char *msg, int msgLen) { - mTrace("msg:%s is sent to mnode", taosMsg[*(msg-1)]); + dTrace("msg:%s is sent to mnode", taosMsg[(uint8_t)(*(msg-1))]); /* * Lite version has no message header, so minus one @@ -81,7 +81,7 @@ void vnodeProcessMsgFromMgmtSpec(SSchedMsg *sched) { char msgType = *sched->msg; char *content = sched->msg + 1; - dTrace("msg:%s is received from mgmt", taosMsg[msgType]); + dTrace("msg:%s is received from mgmt", taosMsg[(uint8_t)msgType]); vnodeProcessMsgFromMgmt(content, 0, msgType, 0); @@ -90,4 +90,6 @@ void vnodeProcessMsgFromMgmtSpec(SSchedMsg *sched) { int vnodeInitMgmt() { return 0; } +void vnodeInitMgmtIp() {} + int vnodeSaveCreateMsgIntoQueue(SVnodeObj *pVnode, char *pMsg, int msgLen) { return 0; } \ No newline at end of file diff --git a/src/system/lite/src/mgmtBalance.spec.c b/src/system/lite/src/mgmtBalance.spec.c index 5100aea936530f9d7c0f1ada0f3bfce1a2ecd6cc..cf3e999e4f48c09df5da03d55b8084b0d1169168 100644 --- a/src/system/lite/src/mgmtBalance.spec.c +++ b/src/system/lite/src/mgmtBalance.spec.c @@ -15,8 +15,9 @@ #define _DEFAULT_SOURCE #include "mgmtBalance.h" +#include "vnodeStatus.h" -void mgmtStartBalanceTimer(int mseconds) {} +void mgmtStartBalanceTimer(int64_t mseconds) {} int mgmtInitBalance() { return 0; } @@ -25,10 +26,11 @@ void mgmtCleanupBalance() {} int mgmtAllocVnodes(SVgObj *pVgroup) { int selectedVnode = -1; SDnodeObj *pDnode = &dnodeObj; + int lastAllocVode = pDnode->lastAllocVnode; for (int i = 0; i < pDnode->numOfVnodes; i++) { - int vnode = (i + pDnode->lastAllocVnode) % pDnode->numOfVnodes; - if (pDnode->vload[vnode].vgId == 0 && pDnode->vload[vnode].status == TSDB_VN_STATUS_READY) { + int vnode = (i + lastAllocVode) % pDnode->numOfVnodes; + if (pDnode->vload[vnode].vgId == 0 && pDnode->vload[vnode].status == TSDB_VN_STATUS_OFFLINE) { selectedVnode = vnode; break; } @@ -38,8 +40,7 @@ int mgmtAllocVnodes(SVgObj *pVgroup) { mError("vgroup:%d alloc vnode failed, free vnodes:%d", pVgroup->vgId, pDnode->numOfFreeVnodes); return -1; } else { - mTrace("vgroup:%d allocate vnode:%d, last allocated vnode:%d", pVgroup->vgId, selectedVnode, - pDnode->lastAllocVnode); + mTrace("vgroup:%d allocate vnode:%d, last allocated vnode:%d", pVgroup->vgId, selectedVnode, lastAllocVode); pVgroup->vnodeGid[0].vnode = selectedVnode; pDnode->lastAllocVnode = selectedVnode + 1; if (pDnode->lastAllocVnode >= pDnode->numOfVnodes) pDnode->lastAllocVnode = 0; @@ -51,10 +52,12 @@ bool mgmtCheckModuleInDnode(SDnodeObj *pDnode, int moduleType) { return tsModule[moduleType].num != 0; } +char *mgmtGetVnodeStatus(SVgObj *pVgroup, SVnodeGid *pVnode) { return "master"; } + bool mgmtCheckVnodeReady(SDnodeObj *pDnode, SVgObj *pVgroup, SVnodeGid *pVnode) { return true; } -void mgmtUpdateDnodeState(SDnodeObj *pDnode, int lbState) {} +void mgmtUpdateDnodeState(SDnodeObj *pDnode, int lbStatus) {} -void mgmtUpdateVgroupState(SVgObj *pVgroup, int lbState, int srcIp) {} +void mgmtUpdateVgroupState(SVgObj *pVgroup, int lbStatus, int srcIp) {} bool mgmtAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDestDnode) { return false; } \ No newline at end of file diff --git a/src/system/lite/src/mgmtDnode.spec.c b/src/system/lite/src/mgmtDnode.spec.c index c34ac58c0084686438e2221a293414845bad705e..7fd9e7a2dfac9fa57969f393b367d668e1c419fa 100644 --- a/src/system/lite/src/mgmtDnode.spec.c +++ b/src/system/lite/src/mgmtDnode.spec.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "mgmt.h" +#include "vnodeStatus.h" SDnodeObj dnodeObj; extern uint32_t tsRebootTime; @@ -26,17 +27,17 @@ int mgmtUpdateDnode(SDnodeObj *pDnode) { return 0; } void mgmtCleanUpDnodes() {} int mgmtInitDnodes() { - dnodeObj.privateIp = inet_addr(tsInternalIp);; + dnodeObj.privateIp = inet_addr(tsPrivateIp);; dnodeObj.createdTime = (int64_t)tsRebootTime * 1000; dnodeObj.lastReboot = tsRebootTime; dnodeObj.numOfCores = (uint16_t)tsNumOfCores; - dnodeObj.status = TSDB_STATUS_READY; + dnodeObj.status = TSDB_DN_STATUS_READY; dnodeObj.alternativeRole = TSDB_DNODE_ROLE_ANY; dnodeObj.numOfTotalVnodes = tsNumOfTotalVnodes; dnodeObj.thandle = (void*)(1); //hack way if (dnodeObj.numOfVnodes == TSDB_INVALID_VNODE_NUM) { mgmtSetDnodeMaxVnodes(&dnodeObj); - mPrint("first access, set total vnodes:%d", dnodeObj.numOfVnodes); + mPrint("dnode first access, set total vnodes:%d", dnodeObj.numOfVnodes); } return 0; } diff --git a/src/system/lite/src/mgmtDnodeInt.spec.c b/src/system/lite/src/mgmtDnodeInt.spec.c index acde36e7b84bb94fa3f471cc7a18a0fa7bd8c2a7..734fa630c5303faddb510093c20de21aafc521b9 100644 --- a/src/system/lite/src/mgmtDnodeInt.spec.c +++ b/src/system/lite/src/mgmtDnodeInt.spec.c @@ -23,6 +23,7 @@ #include "tutil.h" #include "vnode.h" #include "tsystem.h" +#include "vnodeStatus.h" extern void *dmQhandle; void * mgmtStatusTimer = NULL; @@ -60,7 +61,7 @@ char *taosBuildReqMsgToDnode(SDnodeObj *pObj, char type) { int taosSendSimpleRspToDnode(SDnodeObj *pObj, char rsptype, char code) { return 0; } int taosSendMsgToDnode(SDnodeObj *pObj, char *msg, int msgLen) { - mTrace("msg:%s is sent to dnode", taosMsg[*(msg-1)]); + mTrace("msg:%s is sent to dnode", taosMsg[(uint8_t)(*(msg-1))]); /* * Lite version has no message header, so minus one @@ -82,7 +83,7 @@ void mgmtCleanUpDnodeInt() {} void mgmtProcessDnodeStatus(void *handle, void *tmrId) { SDnodeObj *pObj = &dnodeObj; pObj->openVnodes = tsOpenVnodes; - pObj->status = TSDB_STATUS_READY; + pObj->status = TSDB_DN_STATUS_READY; float memoryUsedMB = 0; taosGetSysMemory(&memoryUsedMB); @@ -93,11 +94,11 @@ void mgmtProcessDnodeStatus(void *handle, void *tmrId) { SVnodeObj * pVnode = vnodeList + vnode; // wait vnode dropped - if (pVload->dropStatus == TSDB_VN_STATUS_DROPPING) { + if (pVload->dropStatus == TSDB_VN_DROP_STATUS_DROPPING) { if (vnodeList[vnode].cfg.maxSessions <= 0) { - pVload->dropStatus = TSDB_VN_STATUS_READY; - pVload->status = TSDB_VN_STATUS_READY; - mPrint("vid:%d, drop finished", pObj->privateIp, vnode); + pVload->dropStatus = TSDB_VN_DROP_STATUS_READY; + pVload->status = TSDB_VN_STATUS_OFFLINE; + mPrint("dnode:%s, vid:%d, drop finished", taosIpStr(pObj->privateIp), vnode); taosTmrStart(mgmtMonitorDbDrop, 10000, NULL, mgmtTmr); } } @@ -107,7 +108,7 @@ void mgmtProcessDnodeStatus(void *handle, void *tmrId) { } pVload->vnode = vnode; - pVload->status = TSDB_VN_STATUS_READY; + pVload->status = TSDB_VN_STATUS_MASTER; pVload->totalStorage = pVnode->vnodeStatistic.totalStorage; pVload->compStorage = pVnode->vnodeStatistic.compStorage; pVload->pointsWritten = pVnode->vnodeStatistic.pointsWritten; @@ -116,7 +117,7 @@ void mgmtProcessDnodeStatus(void *handle, void *tmrId) { SVgObj *pVgroup = mgmtGetVgroup(vgId); if (pVgroup == NULL) { mError("vgroup:%d is not there, but associated with vnode %d", vgId, vnode); - pVload->dropStatus = TSDB_VN_STATUS_DROPPING; + pVload->dropStatus = TSDB_VN_DROP_STATUS_DROPPING; continue; } @@ -126,9 +127,9 @@ void mgmtProcessDnodeStatus(void *handle, void *tmrId) { continue; } - if (pVload->vgId == 0 || pVload->dropStatus == TSDB_VN_STATUS_DROPPING) { + if (pVload->vgId == 0 || pVload->dropStatus == TSDB_VN_DROP_STATUS_DROPPING) { mError("vid:%d, mgmt not exist, drop it", vnode); - pVload->dropStatus = TSDB_VN_STATUS_DROPPING; + pVload->dropStatus = TSDB_VN_DROP_STATUS_DROPPING; } } @@ -141,7 +142,7 @@ void mgmtProcessDnodeStatus(void *handle, void *tmrId) { void mgmtProcessMsgFromDnodeSpec(SSchedMsg *sched) { char msgType = *sched->msg; char *content = sched->msg + 1; - mTrace("msg:%s is received from dnode", taosMsg[msgType]); + mTrace("msg:%s is received from dnode", taosMsg[(uint8_t)msgType]); mgmtProcessMsgFromDnode(content, 0, msgType, mgmtGetDnode(0)); free(sched->msg); diff --git a/src/system/lite/src/vnodeFile.spec.c b/src/system/lite/src/vnodeFile.spec.c index 53651a8cc947bde2fe48b481cfdf7f30b158120c..4ad624d2ad74b7ded307c014586552ef4f0a6e4e 100644 --- a/src/system/lite/src/vnodeFile.spec.c +++ b/src/system/lite/src/vnodeFile.spec.c @@ -24,7 +24,7 @@ char* vnodeGetDataDir(int vnode, int fileId) { return dataDir; } void vnodeAdustVnodeFile(SVnodeObj *pVnode) { // Retention policy here int fileId = pVnode->fileId - pVnode->numOfFiles + 1; - int cfile = taosGetTimestamp(pVnode->cfg.precision)/pVnode->cfg.daysPerFile/tsMsPerDay[pVnode->cfg.precision]; + int cfile = taosGetTimestamp(pVnode->cfg.precision)/pVnode->cfg.daysPerFile/tsMsPerDay[(uint8_t)pVnode->cfg.precision]; while (fileId <= cfile - pVnode->maxFiles) { vnodeRemoveFile(pVnode->vnode, fileId); pVnode->numOfFiles--; @@ -97,7 +97,7 @@ int vnodeCheckNewHeaderFile(int fd, SVnodeObj *pVnode) { } _correct_exit: - dTrace("vid: %d new header file %s is correct", pVnode->vnode, pVnode->nfn); + dPrint("vid: %d new header file %s is correct", pVnode->vnode, pVnode->nfn); tfree(pBlocks); tfree(pHeader); return 0; diff --git a/src/system/lite/src/vnodePeer.spec.c b/src/system/lite/src/vnodePeer.spec.c index 1ceb8465c3fd5743da4ba105c9d32580fadca9ae..34400d4051729a8b0d8e50ec01a8d5ae877c9622 100644 --- a/src/system/lite/src/vnodePeer.spec.c +++ b/src/system/lite/src/vnodePeer.spec.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "vnode.h" +#include "vnodeStatus.h" int vnodeInitPeer(int numOfThreads) { return 0; } @@ -30,8 +31,8 @@ void vnodeBroadcastStatusToUnsyncedPeer(SVnodeObj *pVnode) {} int vnodeOpenPeerVnode(int vnode) { SVnodeObj *pVnode = vnodeList + vnode; - pVnode->status = (pVnode->cfg.replications > 1) ? TSDB_STATUS_UNSYNCED : TSDB_STATUS_MASTER; - dTrace("vid:%d, vnode status:%d numOfPeers:%d", vnode, pVnode->status, pVnode->cfg.replications-1); + pVnode->vnodeStatus = (pVnode->cfg.replications > 1) ? TSDB_VN_STATUS_UNSYNCED : TSDB_VN_STATUS_MASTER; + dPrint("vid:%d, status:%s numOfPeers:%d", vnode, taosGetVnodeStatusStr(pVnode->vnodeStatus), pVnode->cfg.replications - 1); vnodeUpdateStreamRole(pVnode); return 0; } diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 7e54759f7e20dffa7c6250af446144d6fcf89c17..5e84f3feadbf4f8cee66c334ac62465ed081936e 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -4,7 +4,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc) -IF (TD_LINUX_64) +IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tutil ${SRC}) TARGET_LINK_LIBRARIES(tutil pthread os m rt) @@ -37,7 +37,6 @@ ELSEIF (TD_WINDOWS_64) LIST(APPEND SRC ./src/ihash.c) LIST(APPEND SRC ./src/lz4.c) LIST(APPEND SRC ./src/shash.c) - LIST(APPEND SRC ./src/sql.c) LIST(APPEND SRC ./src/tbase64.c) LIST(APPEND SRC ./src/tcache.c) LIST(APPEND SRC ./src/tcompression.c) @@ -59,8 +58,6 @@ ELSEIF (TD_WINDOWS_64) LIST(APPEND SRC ./src/tskiplist.c) LIST(APPEND SRC ./src/tsocket.c) LIST(APPEND SRC ./src/tstatus.c) - LIST(APPEND SRC ./src/tstoken.c) - LIST(APPEND SRC ./src/tstoken.c) LIST(APPEND SRC ./src/tstrbuild.c) LIST(APPEND SRC ./src/ttime.c) LIST(APPEND SRC ./src/ttimer.c) diff --git a/src/util/src/ihash.c b/src/util/src/ihash.c index 606bddd79b53bf843971dafdf39caca8c6df604a..8c492b03f867036d3fcb3a52f872f57142cba7ec 100644 --- a/src/util/src/ihash.c +++ b/src/util/src/ihash.c @@ -13,13 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include - #include "os.h" typedef struct _str_node_t { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 7804cb0d0e8d6813f77918df5bc3dbe7d6047acf..8a2f1347df9e7eb7d7fb29623eab4120b5484aeb 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -13,15 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include - -#include -#include -#include +#include "os.h" #include "tcache.h" #include "tlog.h" @@ -516,7 +508,7 @@ static SDataNode *taosUpdateCacheImpl(SCacheObj *pObj, SDataNode *pNode, char *k pNewNode->addTime = taosGetTimestampMs(); pNewNode->time = pNewNode->addTime + keepTime; - __sync_add_and_fetch_32(&pNewNode->refCount, 1); + atomic_add_fetch_32(&pNewNode->refCount, 1); // the address of this node may be changed, so the prev and next element should update the corresponding pointer taosUpdateInHashTable(pObj, pNewNode); @@ -529,7 +521,7 @@ static SDataNode *taosUpdateCacheImpl(SCacheObj *pObj, SDataNode *pNode, char *k return NULL; } - __sync_add_and_fetch_32(&pNewNode->refCount, 1); + atomic_add_fetch_32(&pNewNode->refCount, 1); assert(hashVal == (*pObj->hashFp)(key, keyLen - 1)); pNewNode->hashVal = hashVal; @@ -558,7 +550,7 @@ static FORCE_INLINE SDataNode *taosAddToCacheImpl(SCacheObj *pObj, char *key, ui return NULL; } - __sync_add_and_fetch_32(&pNode->refCount, 1); + atomic_add_fetch_32(&pNode->refCount, 1); pNode->hashVal = (*pObj->hashFp)(key, keyLen - 1); taosAddNodeToHashTable(pObj, pNode); @@ -616,7 +608,7 @@ static FORCE_INLINE void taosDecRef(SDataNode *pNode) { } if (pNode->refCount > 0) { - __sync_sub_and_fetch_32(&pNode->refCount, 1); + atomic_sub_fetch_32(&pNode->refCount, 1); pTrace("key:%s is released by app.refcnt:%d", pNode->key, pNode->refCount); } else { /* @@ -676,20 +668,20 @@ void *taosGetDataFromCache(void *handle, char *key) { SDataNode *ptNode = taosGetNodeFromHashTable(handle, key, keyLen); if (ptNode != NULL) { - __sync_add_and_fetch_32(&ptNode->refCount, 1); + atomic_add_fetch_32(&ptNode->refCount, 1); } __cache_unlock(pObj); if (ptNode != NULL) { - __sync_add_and_fetch_32(&pObj->statistics.hitCount, 1); + atomic_add_fetch_32(&pObj->statistics.hitCount, 1); pTrace("key:%s is retrieved from cache,refcnt:%d", key, ptNode->refCount); } else { - __sync_add_and_fetch_32(&pObj->statistics.missCount, 1); + atomic_add_fetch_32(&pObj->statistics.missCount, 1); pTrace("key:%s not in cache,retrieved failed", key); } - __sync_add_and_fetch_32(&pObj->statistics.totalAccess, 1); + atomic_add_fetch_32(&pObj->statistics.totalAccess, 1); return (ptNode != NULL) ? ptNode->data : NULL; } diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c index cd69f30c3f212bc4e027f4eac3f05659cd143c0d..0f0c7bed347fce36ea8054ce523619998a1f6327 100644 --- a/src/util/src/tcompression.c +++ b/src/util/src/tcompression.c @@ -46,12 +46,6 @@ * of the XORed value with informations. If not, record the first corresponding bytes. * */ -#include -#include -#include -#include -#include -#include #include "os.h" #include "lz4.h" @@ -361,16 +355,16 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o tmp_bit = (LONG_BYTES * BITS_PER_BYTE) - BUILDIN_CLZL(zigzag_value); } - if (elems + 1 <= selector_to_elems[selector] && elems + 1 <= selector_to_elems[bit_to_selector[tmp_bit]]) { + if (elems + 1 <= selector_to_elems[(int)selector] && elems + 1 <= selector_to_elems[(int)(bit_to_selector[(int)tmp_bit])]) { // If can hold another one. - selector = selector > bit_to_selector[tmp_bit] ? selector : bit_to_selector[tmp_bit]; + selector = selector > bit_to_selector[(int)tmp_bit] ? selector : bit_to_selector[(int)tmp_bit]; elems++; - bit = bit_per_integer[selector]; + bit = bit_per_integer[(int)selector]; } else { // if cannot hold another one. - while (elems < selector_to_elems[selector]) selector++; - elems = selector_to_elems[selector]; - bit = bit_per_integer[selector]; + while (elems < selector_to_elems[(int)selector]) selector++; + elems = selector_to_elems[(int)selector]; + bit = bit_per_integer[(int)selector]; break; } prev_value_tmp = curr_value; @@ -461,8 +455,8 @@ int tsDecompressINTImp(const char *const input, const int nelements, char *const memcpy(&w, ip, LONG_BYTES); char selector = (char)(w & INT64MASK(4)); // selector = 4 - char bit = bit_per_integer[selector]; // bit = 3 - int elems = selector_to_elems[selector]; + char bit = bit_per_integer[(int)selector]; // bit = 3 + int elems = selector_to_elems[(int)selector]; for (int i = 0; i < elems; i++) { uint64_t zigzag_value; diff --git a/src/util/src/tcrc32c.c b/src/util/src/tcrc32c.c index 88841bcbc926d5ab18f64c11cc92c5c22f9e2e81..546693c4bbc82750b7bbc1a117469fb816fd0362 100644 --- a/src/util/src/tcrc32c.c +++ b/src/util/src/tcrc32c.c @@ -25,8 +25,6 @@ #include #include "tcrc32c.h" -//todo : use the original source code -#pragma GCC diagnostic ignored "-Wunused-function" #define POLY 0x82f63b78 #define LONG_SHIFT 8192 @@ -1093,6 +1091,7 @@ static uint32_t short_shifts[4][256] = { 0xe1a734e7, 0xc41cc13c, 0x140cd014, 0x31b725cf, 0x5f7b3ba2, 0x7ac0ce79, 0x82e30778, 0xa758f2a3, 0xc994ecce, 0xec2f1915}}; +#if 0 static uint32_t append_trivial(uint32_t crc, crc_stream input, size_t length) { for (size_t i = 0; i < length; ++i) { crc = crc ^ input[i]; @@ -1130,6 +1129,7 @@ static uint32_t append_adler_table(uint32_t crci, crc_stream input, } return (uint32_t)(crc ^ 0xffffffff); } +#endif /* Table-driven software version as a fall-back. This is about 15 times slower than using the hardware instructions. This assumes little-endian integers, @@ -1353,7 +1353,7 @@ uint32_t crc32c_hw(uint32_t crc, crc_stream buf, size_t len) { #endif // #ifndef _TD_ARM_ void taosResolveCRC() { -#ifndef _TD_ARM_32 +#ifndef _TD_ARM_ int sse42; SSE42(sse42); crc32c = sse42 ? crc32c_hw : crc32c_sf; diff --git a/src/util/src/textbuffer.c b/src/util/src/textbuffer.c index c6b56919e9707d336319f5815f12de042db1961d..a2ad90d304112ce665cf26c3ebfcd9a756cf6522 100644 --- a/src/util/src/textbuffer.c +++ b/src/util/src/textbuffer.c @@ -12,29 +12,15 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - -#include -#include -#include -#include - -#include -#include -#include - -#include - #include "os.h" #include "taos.h" #include "taosmsg.h" #include "textbuffer.h" #include "tlog.h" -#include "tsql.h" #include "tsqlfunction.h" #include "ttime.h" #include "ttypes.h" - -#pragma GCC diagnostic ignored "-Wformat" +#include "tutil.h" #define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \ (data + (schema)->colOffset[colId] * (allrow) + (rowId) * (schema)->pFields[colId].bytes) @@ -58,9 +44,8 @@ void getTmpfilePath(const char *fileNamePrefix, char *dstPath) { strcpy(tmpPath, tmpDir); strcat(tmpPath, tdengineTmpFileNamePrefix); strcat(tmpPath, fileNamePrefix); - strcat(tmpPath, "-%u-%u"); - - snprintf(dstPath, MAX_TMPFILE_PATH_LENGTH, tmpPath, taosGetPthreadId(), __sync_add_and_fetch_32(&tmpFileSerialNum, 1)); + strcat(tmpPath, "-%llu-%u"); + snprintf(dstPath, MAX_TMPFILE_PATH_LENGTH, tmpPath, taosGetPthreadId(), atomic_add_fetch_32(&tmpFileSerialNum, 1)); } /* @@ -443,7 +428,8 @@ void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t * } void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { - double v = *(double *)value; + //double v = *(double *)value; + double v = GET_DOUBLE_VAL(value); if (pBucket->nRange.dMinVal == DBL_MAX) { /* @@ -687,7 +673,8 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) { break; }; case TSDB_DATA_TYPE_DOUBLE: { - double val = *(double *)data; + //double val = *(double *)data; + double val = GET_DOUBLE_VAL(data); if (r->dMinVal > val) { r->dMinVal = val; } @@ -698,7 +685,8 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) { break; }; case TSDB_DATA_TYPE_FLOAT: { - double val = *(float *)data; + //double val = *(float *)data; + double val = GET_FLOAT_VAL(data); if (r->dMinVal > val) { r->dMinVal = val; @@ -746,12 +734,14 @@ void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows) { break; } case TSDB_DATA_TYPE_DOUBLE: { - double val = *(double *)d; + //double val = *(double *)d; + double val = GET_DOUBLE_VAL(d); (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); break; } case TSDB_DATA_TYPE_FLOAT: { - double val = *(float *)d; + //double val = *(float *)d; + double val = GET_FLOAT_VAL(d); (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); break; } @@ -852,16 +842,20 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i return (first < second) ? -1 : 1; }; case TSDB_DATA_TYPE_DOUBLE: { - double first = *(double *)f1; - double second = *(double *)f2; + //double first = *(double *)f1; + double first = GET_DOUBLE_VAL(f1); + //double second = *(double *)f2; + double second = GET_DOUBLE_VAL(f2); if (first == second) { return 0; } return (first < second) ? -1 : 1; }; case TSDB_DATA_TYPE_FLOAT: { - float first = *(float *)f1; - float second = *(float *)f2; + //float first = *(float *)f1; + //float second = *(float *)f2; + float first = GET_FLOAT_VAL(f1); + float second = GET_FLOAT_VAL(f2); if (first == second) { return 0; } @@ -1020,7 +1014,7 @@ static void UNUSED_FUNC tSortDataPrint(int32_t type, char *prefix, char *startx, break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%s:(%lld, %lld, %lld)\n", prefix, *(int64_t *)startx, *(int64_t *)midx, *(int64_t *)endx); + printf("%s:(%" PRId64 ", %" PRId64 ", %" PRId64 ")\n", prefix, *(int64_t *)startx, *(int64_t *)midx, *(int64_t *)endx); break; case TSDB_DATA_TYPE_FLOAT: printf("%s:(%f, %f, %f)\n", prefix, *(float *)startx, *(float *)midx, *(float *)endx); @@ -1096,7 +1090,7 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)startx); + printf("%" PRId64 "\t", *(int64_t *)startx); break; case TSDB_DATA_TYPE_BINARY: printf("%s\t", startx); @@ -1267,7 +1261,7 @@ static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx, assert(pPage->numOfElems > 0); tColModelAppend(pDesc->pSchema, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems); - printf("id: %d count: %d\n", j, buffer->numOfElems); + printf("id: %d count: %" PRIu64 "\n", j, buffer->numOfElems); } } tfree(pPage); @@ -1310,10 +1304,16 @@ double findOnlyResult(tMemBucket *pMemBucket) { return *(int8_t *)pPage->data; case TSDB_DATA_TYPE_BIGINT: return (double)(*(int64_t *)pPage->data); - case TSDB_DATA_TYPE_DOUBLE: - return *(double *)pPage->data; - case TSDB_DATA_TYPE_FLOAT: - return *(float *)pPage->data; + case TSDB_DATA_TYPE_DOUBLE: { + double dv = GET_DOUBLE_VAL(pPage->data); + //return *(double *)pPage->data; + return dv; + } + case TSDB_DATA_TYPE_FLOAT: { + float fv = GET_FLOAT_VAL(pPage->data); + //return *(float *)pPage->data; + return fv; + } default: return 0; } @@ -1373,10 +1373,16 @@ static void printBinaryData(char *data, int32_t len) { } if (len == 50) { // probably the avg intermediate result - printf("%lf,%d\t", *(double *)data, *(int64_t *)(data + sizeof(double))); + printf("%lf,%" PRId64 "\t", *(double *)data, *(int64_t *)(data + sizeof(double))); } else if (data[8] == ',') { // in TSDB_FUNC_FIRST_DST/TSDB_FUNC_LAST_DST, // the value is seperated by ',' - printf("%ld,%0x\t", *(int64_t *)data, data + sizeof(int64_t) + 1); + //printf("%" PRId64 ",%0x\t", *(int64_t *)data, data + sizeof(int64_t) + 1); + printf("%" PRId64 ", HEX: ", *(int64_t *)data); + int32_t tmp_len = len - sizeof(int64_t) - 1; + for (int32_t i = 0; i < tmp_len; ++i) { + printf("%0x ", *(data + sizeof(int64_t) + 1 + i)); + } + printf("\t"); } else if (isCharString) { printf("%s\t", data); } @@ -1386,26 +1392,26 @@ static void printBinaryData(char *data, int32_t len) { static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) { if (param->functionId == TSDB_FUNC_LAST_DST) { switch (param->type) { - case TSDB_DATA_TYPE_TINYINT:printf("%lld,%d\t", *(int64_t *) data, *(int8_t *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_TINYINT:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int8_t *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_SMALLINT:printf("%lld,%d\t", *(int64_t *) data, *(int16_t *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_SMALLINT:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int16_t *) (data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT:printf("%lld,%lld\t", *(int64_t *) data, *(int64_t *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_BIGINT:printf("%" PRId64 ",%" PRId64 "\t", *(int64_t *) data, *(int64_t *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_FLOAT:printf("%lld,%d\t", *(int64_t *) data, *(float *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_FLOAT:printf("%" PRId64 ",%f\t", *(int64_t *) data, *(float *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_DOUBLE:printf("%lld,%d\t", *(int64_t *) data, *(double *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_DOUBLE:printf("%" PRId64 ",%f\t", *(int64_t *) data, *(double *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_BINARY:printf("%lld,%s\t", *(int64_t *) data, (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_BINARY:printf("%" PRId64 ",%s\t", *(int64_t *) data, (data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_INT: - default:printf("%lld,%d\t", *(int64_t *) data, *(int32_t *) (data + TSDB_KEYSIZE + 1)); + default:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int32_t *) (data + TSDB_KEYSIZE + 1)); break; } } else if (param->functionId == TSDB_FUNC_AVG) { - printf("%f,%lld\t", *(double *) data, *(int64_t *) (data + sizeof(double) + 1)); + printf("%f,%" PRId64 "\t", *(double *) data, *(int64_t *) (data + sizeof(double) + 1)); } else { // functionId == TSDB_FUNC_MAX_DST | TSDB_FUNC_TAG switch (param->type) { @@ -1417,13 +1423,13 @@ static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) { break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); break; case TSDB_DATA_TYPE_FLOAT: - printf("%d\t", *(float *)data); + printf("%f\t", *(float *)data); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%d\t", *(double *)data); + printf("%f\t", *(double *)data); break; case TSDB_DATA_TYPE_BINARY: printf("%s\t", data); @@ -1431,7 +1437,7 @@ static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) { case TSDB_DATA_TYPE_INT: default: - printf("%d\t", *(double *)data); + printf("%f\t", *(double *)data); break; } } @@ -1447,7 +1453,7 @@ void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t switch (type) { case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int32_t *)val); @@ -1465,7 +1471,7 @@ void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t printf("%lf\t", *(double *)val); break; case TSDB_DATA_TYPE_TIMESTAMP: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_TINYINT: printf("%d\t", *(int8_t *)val); @@ -1498,7 +1504,7 @@ void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32 switch (pModel->pFields[j].type) { case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int32_t *)val); @@ -1516,7 +1522,7 @@ void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32 printf("%lf\t", *(double *)val); break; case TSDB_DATA_TYPE_TIMESTAMP: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_TINYINT: printf("%d\t", *(int8_t *)val); @@ -1800,13 +1806,17 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) break; }; case TSDB_DATA_TYPE_FLOAT: { - td = *(float *)thisVal; - nd = *(float *)nextVal; + //td = *(float *)thisVal; + //nd = *(float *)nextVal; + td = GET_FLOAT_VAL(thisVal); + nd = GET_FLOAT_VAL(nextVal); break; } case TSDB_DATA_TYPE_DOUBLE: { - td = *(double *)thisVal; - nd = *(double *)nextVal; + //td = *(double *)thisVal; + td = GET_DOUBLE_VAL(thisVal); + //nd = *(double *)nextVal; + nd = GET_DOUBLE_VAL(nextVal); break; } case TSDB_DATA_TYPE_BIGINT: { @@ -1843,15 +1853,17 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) break; }; case TSDB_DATA_TYPE_FLOAT: { - finalResult = *(float *)thisVal; + //finalResult = *(float *)thisVal; + finalResult = GET_FLOAT_VAL(thisVal); break; } case TSDB_DATA_TYPE_DOUBLE: { - finalResult = *(double *)thisVal; + //finalResult = *(double *)thisVal; + finalResult = GET_DOUBLE_VAL(thisVal); break; } case TSDB_DATA_TYPE_BIGINT: { - finalResult = (double)*(int64_t *)thisVal; + finalResult = (double)(*(int64_t *)thisVal); break; } } diff --git a/src/util/src/tglobalcfg.c b/src/util/src/tglobalcfg.c index 9abe60220f27452d74728abb0d65a5abb9a9edf2..4d4bace68136d0cfd487c1e7f24f372eec50ab1d 100644 --- a/src/util/src/tglobalcfg.c +++ b/src/util/src/tglobalcfg.c @@ -58,12 +58,12 @@ int64_t tsMsPerDay[] = {86400000L, 86400000000L}; char tsMasterIp[TSDB_IPv4ADDR_LEN] = {0}; char tsSecondIp[TSDB_IPv4ADDR_LEN] = {0}; -short tsMgmtShellPort = 6030; // udp[6030-6034] tcp[6030] -short tsVnodeShellPort = 6035; // udp[6035-6039] tcp[6035] -short tsMgmtVnodePort = 6040; // udp[6040-6044] tcp[6040] -short tsVnodeVnodePort = 6045; // tcp[6045] -short tsMgmtMgmtPort = 6050; // sdbPeerPort only udp, numOfVnodes fixed to 1, range udp[6050] -short tsMgmtSyncPort = 6050; // sdbSyncPort only tcp, range tcp[6050] +uint16_t tsMgmtShellPort = 6030; // udp[6030-6034] tcp[6030] +uint16_t tsVnodeShellPort = 6035; // udp[6035-6039] tcp[6035] +uint16_t tsMgmtVnodePort = 6040; // udp[6040-6044] tcp[6040] +uint16_t tsVnodeVnodePort = 6045; // tcp[6045] +uint16_t tsMgmtMgmtPort = 6050; // udp, numOfVnodes fixed to 1, range udp[6050] +uint16_t tsMgmtSyncPort = 6050; // tcp, range tcp[6050] int tsStatusInterval = 1; // second int tsShellActivityTimer = 3; // second @@ -75,16 +75,15 @@ int tsMetricMetaKeepTimer = 600; // second float tsNumOfThreadsPerCore = 1.0; float tsRatioOfQueryThreads = 0.5; char tsPublicIp[TSDB_IPv4ADDR_LEN] = {0}; -char tsInternalIp[TSDB_IPv4ADDR_LEN] = {0}; char tsPrivateIp[TSDB_IPv4ADDR_LEN] = {0}; -char tsServerIpStr[TSDB_IPv4ADDR_LEN] = "0.0.0.0"; +char tsServerIpStr[TSDB_IPv4ADDR_LEN] = "127.0.0.1"; short tsNumOfVnodesPerCore = 8; short tsNumOfTotalVnodes = 0; short tsCheckHeaderFile = 0; int tsSessionsPerVnode = 1000; int tsCacheBlockSize = 16384; // 256 columns -int tsAverageCacheBlocks = 4; +int tsAverageCacheBlocks = TSDB_DEFAULT_AVG_BLOCKS; int tsRowsInFileBlock = 4096; float tsFileBlockMinPercent = 0.05; @@ -92,10 +91,10 @@ float tsFileBlockMinPercent = 0.05; short tsNumOfBlocksPerMeter = 100; short tsCommitTime = 3600; // seconds short tsCommitLog = 1; -short tsCompression = 2; +short tsCompression = TSDB_MAX_COMPRESSION_LEVEL; short tsDaysPerFile = 10; int tsDaysToKeep = 3650; -int tsReplications = 1; +int tsReplications = TSDB_REPLICA_MIN_NUM; int tsNumOfMPeers = 3; int tsMaxShellConns = 2000; @@ -124,6 +123,7 @@ int tsMgmtEqualVnodeNum = 0; int tsEnableHttpModule = 1; int tsEnableMonitorModule = 1; int tsRestRowLimit = 10240; +int tsMaxSQLStringLen = TSDB_MAX_SQL_LEN; /* * denote if the server needs to compress response message at the application layer to client, including query rsp, @@ -152,8 +152,8 @@ int tsProjectExecInterval = 10000; // every 10sec, the projection will be int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance char tsHttpIp[TSDB_IPv4ADDR_LEN] = "0.0.0.0"; -short tsHttpPort = 6020; // only tcp, range tcp[6020] -// short tsNginxPort = 6060; //only tcp, range tcp[6060] +uint16_t tsHttpPort = 6020; // only tcp, range tcp[6020] +// uint16_t tsNginxPort = 6060; //only tcp, range tcp[6060] int tsHttpCacheSessions = 100; int tsHttpSessionExpire = 36000; int tsHttpMaxThreads = 2; @@ -162,6 +162,17 @@ int tsHttpEnableRecordSql = 0; int tsTelegrafUseFieldNum = 0; int tsAdminRowLimit = 10240; +int tsTscEnableRecordSql = 0; +int tsEnableCoreFile = 0; +int tsAnyIp = 1; +uint32_t tsPublicIpInt = 0; + +#ifdef CLUSTER +int tsIsCluster = 1; +#else +int tsIsCluster = 0; +#endif + int tsRpcTimer = 300; int tsRpcMaxTime = 600; // seconds; @@ -364,7 +375,7 @@ void tsReadLogOption(char *option, char *value) { } } -SGlobalConfig *tsGetConfigOption(char *option) { +SGlobalConfig *tsGetConfigOption(const char *option) { tsInitGlobalConfig(); for (int i = 0; i < tsGlobalConfigNum; ++i) { SGlobalConfig *cfg = tsGlobalConfig + i; @@ -374,7 +385,7 @@ SGlobalConfig *tsGetConfigOption(char *option) { return NULL; } -void tsReadConfigOption(char *option, char *value) { +void tsReadConfigOption(const char *option, char *value) { for (int i = 0; i < tsGlobalConfigNum; ++i) { SGlobalConfig *cfg = tsGlobalConfig + i; if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_CONFIG)) continue; @@ -423,9 +434,7 @@ void tsInitConfigOption(SGlobalConfig *cfg, char *name, void *ptr, int8_t valTyp cfg->cfgStatus = TSDB_CFG_CSTATUS_NONE; } -void tsInitGlobalConfig() { - if (tsGlobalConfig != NULL) return; - +static void doInitGlobalConfig() { tsGlobalConfig = (SGlobalConfig *) malloc(sizeof(SGlobalConfig) * TSDB_CFG_MAX_NUM); memset(tsGlobalConfig, 0, sizeof(SGlobalConfig) * TSDB_CFG_MAX_NUM); @@ -447,9 +456,6 @@ void tsInitGlobalConfig() { tsInitConfigOption(cfg++, "privateIp", tsPrivateIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); - tsInitConfigOption(cfg++, "internalIp", tsInternalIp, TSDB_CFG_VTYPE_IPSTR, - TSDB_CFG_CTYPE_B_CONFIG, - 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "localIp", tsLocalIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); @@ -509,7 +515,7 @@ void tsInitGlobalConfig() { 0, TSDB_MAX_VNODES, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "tables", &tsSessionsPerVnode, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, - 4, 220000, 0, TSDB_CFG_UTYPE_NONE); + TSDB_MIN_TABLES_PER_VNODE, TSDB_MAX_TABLES_PER_VNODE, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "cache", &tsCacheBlockSize, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 100, 1048576, 0, TSDB_CFG_UTYPE_BYTE); @@ -521,7 +527,7 @@ void tsInitGlobalConfig() { 0, 1.0, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "ablocks", &tsAverageCacheBlocks, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, - 2, 128, 0, TSDB_CFG_UTYPE_NONE); + TSDB_MIN_AVG_BLOCKS, TSDB_MAX_AVG_BLOCKS, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "tblocks", &tsNumOfBlocksPerMeter, TSDB_CFG_VTYPE_SHORT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 32, 4096, 0, TSDB_CFG_UTYPE_NONE); @@ -536,6 +542,11 @@ void tsInitGlobalConfig() { 0, 2, 0, TSDB_CFG_UTYPE_NONE); // 0-any, 1-mgmt, 2-dnode + // timer + tsInitConfigOption(cfg++, "maxTmrCtrl", &taosMaxTmrCtrl, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLUSTER, + 8, 2048, 0, TSDB_CFG_UTYPE_NONE); + // time tsInitConfigOption(cfg++, "monitorInterval", &tsMonitorInterval, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, @@ -638,6 +649,7 @@ void tsInitGlobalConfig() { tsInitConfigOption(cfg++, "defaultPass", tsDefaultPass, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT, 0, 0, TSDB_PASSWORD_LEN, TSDB_CFG_UTYPE_NONE); + // socket type, udp by default tsInitConfigOption(cfg++, "sockettype", tsSocketType, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, @@ -646,7 +658,11 @@ void tsInitGlobalConfig() { tsInitConfigOption(cfg++, "compressMsgSize", &tsCompressMsgSize, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, -1, 10000000, 0, TSDB_CFG_UTYPE_NONE); - + + tsInitConfigOption(cfg++, "maxSQLLength", &tsMaxSQLStringLen, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, + TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_BYTE); + // locale & charset tsInitConfigOption(cfg++, "timezone", tsTimezone, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, @@ -702,7 +718,7 @@ void tsInitGlobalConfig() { 1, 100000, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "httpEnableRecordSql", &tsHttpEnableRecordSql, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, - 1, 100000, 0, TSDB_CFG_UTYPE_NONE); + 0, 1, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "telegrafUseFieldNum", &tsTelegrafUseFieldNum, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 1, 1, TSDB_CFG_UTYPE_NONE); @@ -764,10 +780,27 @@ void tsInitGlobalConfig() { 0, 255, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "tscEnableRecordSql", &tsTscEnableRecordSql, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG, + 0, 1, 0, TSDB_CFG_UTYPE_NONE); + + tsInitConfigOption(cfg++, "enableCoreFile", &tsEnableCoreFile, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG, + 0, 1, 0, TSDB_CFG_UTYPE_NONE); + +#ifdef CLUSTER + tsInitConfigOption(cfg++, "anyIp", &tsAnyIp, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG, + 0, 1, 0, TSDB_CFG_UTYPE_NONE); +#endif + // version info tsInitConfigOption(cfg++, "gitinfo", gitinfo, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "gitinfoOfInternal", gitinfoOfInternal, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, + 0, 0, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "buildinfo", buildinfo, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, 0, TSDB_CFG_UTYPE_NONE); @@ -776,6 +809,12 @@ void tsInitGlobalConfig() { 0, 0, 0, TSDB_CFG_UTYPE_NONE); tsGlobalConfigNum = (int)(cfg - tsGlobalConfig); + assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); +} + +static pthread_once_t initGlobalConfig = PTHREAD_ONCE_INIT; +void tsInitGlobalConfig() { + pthread_once(&initGlobalConfig, doInitGlobalConfig); } void tsReadGlobalLogConfig() { @@ -882,10 +921,7 @@ bool tsReadGlobalConfig() { if (tsPublicIp[0] == 0) { strcpy(tsPublicIp, tsPrivateIp); } - - if (tsInternalIp[0] == 0) { - strcpy(tsInternalIp, tsPrivateIp); - } + tsPublicIpInt = inet_addr(tsPublicIp); if (tsLocalIp[0] == 0) { strcpy(tsLocalIp, tsPrivateIp); @@ -1148,4 +1184,4 @@ void tsPrintGlobalConfigSpec() { pPrint(" dataDir: %s", dataDir); } -#endif \ No newline at end of file +#endif diff --git a/src/util/src/thash.c b/src/util/src/thash.c index 3b9cec0df5b9388710d28620bcaa68a58c2d44a9..e3c6fe26b4e98185d48ff55d8cbfecd08a16ddbb 100644 --- a/src/util/src/thash.c +++ b/src/util/src/thash.c @@ -13,13 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include #include "os.h" #include "tmempool.h" diff --git a/src/util/src/thistogram.c b/src/util/src/thistogram.c index 6cd02cc72e7ab4b09271a00b03aa1ac558fcc8f3..a3f6e7203cb63f8fbb80f3f524b510820b39facf 100644 --- a/src/util/src/thistogram.c +++ b/src/util/src/thistogram.c @@ -12,14 +12,7 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - -#include -#include -#include -#include -#include -#include -#include +#include "os.h" #include "taosmsg.h" #include "thistogram.h" @@ -453,7 +446,7 @@ void tHistogramPrint(SHistogramInfo* pHisto) { printf("total entries: %d, elements: %d\n", pHisto->numOfEntries, pHisto->numOfElems); #if defined(USE_ARRAYLIST) for (int32_t i = 0; i < pHisto->numOfEntries; ++i) { - printf("%d: (%f, %lld)\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num); + printf("%d: (%f, %" PRId64 ")\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num); } #else tSkipListNode* pNode = pHisto->pList->pHead.pForward[0]; diff --git a/src/util/src/tidpool.c b/src/util/src/tidpool.c index a9cc78a026f4108c758ad6e47ffacce148e56001..c50c38aa3cabd74266d4164f0ebdf4581ce670f6 100644 --- a/src/util/src/tidpool.c +++ b/src/util/src/tidpool.c @@ -13,10 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include -#include +#include "os.h" #include "tlog.h" typedef struct { diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 02e30be33a7babde369f5d3b6c8ab8ef71d1fba6..1a7f672e00321c0891aa54ae9f3cc3efedb89d54 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -13,22 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "tlog.h" #include "tutil.h" @@ -381,7 +365,7 @@ void tprintf(const char *const flags, int dflag, const char *const format, ...) } if (taosLogMaxLines > 0) { - __sync_add_and_fetch_32(&taosLogLines, 1); + atomic_add_fetch_32(&taosLogLines, 1); if ((taosLogLines > taosLogMaxLines) && (openInProgress == 0)) taosOpenNewLogFile(); } @@ -458,7 +442,7 @@ void taosPrintLongString(const char *const flags, int dflag, const char *const f taosPushLogBuffer(logHandle, buffer, len); if (taosLogMaxLines > 0) { - __sync_add_and_fetch_32(&taosLogLines, 1); + atomic_add_fetch_32(&taosLogLines, 1); if ((taosLogLines > taosLogMaxLines) && (openInProgress == 0)) taosOpenNewLogFile(); } diff --git a/src/util/src/tmem.c b/src/util/src/tmem.c index c2bb8366354109efdb54ec589e9204af0826e1f1..2625e4e5e6a645c4de2bd97534324b037acb4aaa 100644 --- a/src/util/src/tmem.c +++ b/src/util/src/tmem.c @@ -13,72 +13,465 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include - +#include "os.h" #include "tlog.h" +#define TAOS_MEM_CHECK_IMPL +#include "tutil.h" + + +#ifdef TAOS_MEM_CHECK + +static int allocMode = TAOS_ALLOC_MODE_DEFAULT; +static FILE* fpAllocLog = NULL; + +//////////////////////////////////////////////////////////////////////////////// +// memory allocator which fails randomly + extern int32_t taosGetTimestampSec(); -static int32_t startTime = 0; -static int64_t m_curLimit = 100*1024; +static int32_t startTime = INT32_MAX;; -bool isMallocMem(unsigned int size, char* _func) { - if (0 == startTime) { - startTime = taosGetTimestampSec(); - return true; - } else { - int32_t currentTime = taosGetTimestampSec(); - if (currentTime - startTime < 10) return true; +static bool random_alloc_fail(size_t size, const char* file, uint32_t line) { + if (taosGetTimestampSec() < startTime) { + return false; } - if (size > m_curLimit) { - if (3 == rand() % 20) { - pTrace("====no alloc mem in func: %s, size:%d", _func, size); - return false; - } + if (size < 100 * (size_t)1024) { + return false; + } + + if (rand() % 20 != 0) { + return false; + } + + if (fpAllocLog != NULL) { + fprintf(fpAllocLog, "%s:%d: memory allocation of %zu bytes will fail.\n", file, line, size); } return true; } -void* taos_malloc(unsigned int size, char* _func) { +static void* malloc_random(size_t size, const char* file, uint32_t line) { + return random_alloc_fail(size, file, line) ? NULL : malloc(size); +} + +static void* calloc_random(size_t num, size_t size, const char* file, uint32_t line) { + return random_alloc_fail(num * size, file, line) ? NULL : calloc(num, size); +} + +static void* realloc_random(void* ptr, size_t size, const char* file, uint32_t line) { + return random_alloc_fail(size, file, line) ? NULL : realloc(ptr, size); +} + +static char* strdup_random(const char* str, const char* file, uint32_t line) { + size_t len = strlen(str); + return random_alloc_fail(len + 1, file, line) ? NULL : strdup(str); +} + +static char* strndup_random(const char* str, size_t size, const char* file, uint32_t line) { + size_t len = strlen(str); + if (len > size) { + len = size; + } + return random_alloc_fail(len + 1, file, line) ? NULL : strndup(str, len); +} + +static ssize_t getline_random(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { + return random_alloc_fail(*n, file, line) ? -1 : getline(lineptr, n, stream); +} + +//////////////////////////////////////////////////////////////////////////////// +// memory allocator with leak detection + +#define MEMBLK_MAGIC 0x55AA + +typedef struct SMemBlock { + const char* file; + uint16_t line; + uint16_t magic; + uint32_t size; + struct SMemBlock* prev; + struct SMemBlock* next; + // TODO: need pading in 32bit platform + char data[0]; +} SMemBlock; + +static SMemBlock *blocks = NULL; +static uintptr_t lock = 0; + +static void add_mem_block(SMemBlock* blk) { + blk->prev = NULL; + while (atomic_val_compare_exchange_ptr(&lock, 0, 1) != 0); + blk->next = blocks; + if (blocks != NULL) { + blocks->prev = blk; + } + blocks = blk; + atomic_store_ptr(&lock, 0); +} + +static void remove_mem_block(SMemBlock* blk) { + while (atomic_val_compare_exchange_ptr(&lock, 0, 1) != 0); + + if (blocks == blk) { + blocks = blk->next; + } + if (blk->prev != NULL) { + blk->prev->next = blk->next; + } + if (blk->next != NULL) { + blk->next->prev = blk->prev; + } + + atomic_store_ptr(&lock, 0); + + blk->prev = NULL; + blk->next = NULL; +} + +static void free_detect_leak(void* ptr, const char* file, uint32_t line) { + if (ptr == NULL) { + return; + } + + SMemBlock* blk = (SMemBlock*)(((char*)ptr) - sizeof(SMemBlock)); + if (blk->magic != MEMBLK_MAGIC) { + if (fpAllocLog != NULL) { + fprintf(fpAllocLog, "%s:%d: memory is allocated by default allocator.\n", file, line); + } + free(ptr); + return; + } + + remove_mem_block(blk); + free(blk); +} + +static void* malloc_detect_leak(size_t size, const char* file, uint32_t line) { + if (size == 0) { + return NULL; + } - if (false == isMallocMem(size, _func)) { + SMemBlock *blk = (SMemBlock*)malloc(size + sizeof(SMemBlock)); + if (blk == NULL) { return NULL; } - - void *p = NULL; - p = malloc(size); + + if (line > UINT16_MAX && fpAllocLog != NULL) { + fprintf(fpAllocLog, "%s:%d: line number too large.\n", file, line); + } + + if (size > UINT32_MAX && fpAllocLog != NULL) { + fprintf(fpAllocLog, "%s:%d: size too large: %zu.\n", file, line, size); + } + + blk->file = file; + blk->line = (uint16_t)line; + blk->magic = MEMBLK_MAGIC; + blk->size = size; + add_mem_block(blk); + + return blk->data; +} + +static void* calloc_detect_leak(size_t num, size_t size, const char* file, uint32_t line) { + size *= num; + void* p = malloc_detect_leak(size, file, line); + if (p != NULL) { + memset(p, 0, size); + } return p; } -void* taos_calloc(unsigned int num, unsigned int size, char* _func) { - - if (false == isMallocMem(size, _func)) { +static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint32_t line) { + if (size == 0) { + free_detect_leak(ptr, file, line); return NULL; } - - void *p = NULL; - p = calloc(num, size); + + if (ptr == NULL) { + return malloc_detect_leak(size, file, line); + } + + SMemBlock* blk = ((char*)ptr) - sizeof(SMemBlock); + if (blk->magic != MEMBLK_MAGIC) { + if (fpAllocLog != NULL) { + fprintf(fpAllocLog, "%s:%d: memory is allocated by default allocator.\n", file, line); + } + return realloc(ptr, size); + } + + remove_mem_block(blk); + + void* p = realloc(blk, size + sizeof(SMemBlock)); + if (p == NULL) { + add_mem_block(blk); + return NULL; + } + + if (size > UINT32_MAX && fpAllocLog != NULL) { + fprintf(fpAllocLog, "%s:%d: size too large: %zu.\n", file, line, size); + } + + blk = (SMemBlock*)p; + blk->size = size; + add_mem_block(blk); + return blk->data; +} + +static char* strdup_detect_leak(const char* str, const char* file, uint32_t line) { + size_t len = strlen(str); + char *p = malloc_detect_leak(len + 1, file, line); + if (p != NULL) { + memcpy(p, str, len); + p[len] = 0; + } return p; } -void* taos_realloc(void* ptr, unsigned int size, char* _func) { - - if (false == isMallocMem(size, _func)) { - return NULL; +static char* strndup_detect_leak(const char* str, size_t size, const char* file, uint32_t line) { + size_t len = strlen(str); + if (len > size) { + len = size; + } + char *p = malloc_detect_leak(len + 1, file, line); + if (p != NULL) { + memcpy(p, str, len); + p[len] = 0; } - - void *p = NULL; - p = realloc(ptr, size); return p; } -void taos_free(void* ptr) { - free(ptr); +static ssize_t getline_detect_leak(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { + char* buf = NULL; + size_t bufSize = 0; + ssize_t size = getline(&buf, &bufSize, stream); + if (size != -1) { + if (*n < size + 1) { + void* p = realloc_detect_leak(*lineptr, size + 1, file, line); + if (p == NULL) { + free(buf); + return -1; + } + *lineptr = (char*)p; + *n = size + 1; + } + memcpy(*lineptr, buf, size + 1); + } + + free(buf); + return size; +} + +static void dump_memory_leak() { + const char* hex = "0123456789ABCDEF"; + const char* fmt = ":%d: addr=%p, size=%d, content(first 16 bytes)="; + size_t numOfBlk = 0, totalSize = 0; + + if (fpAllocLog == NULL) { + return; + } + + fputs("memory blocks allocated but not freed before exit:\n", fpAllocLog); + + while (atomic_val_compare_exchange_ptr(&lock, 0, 1) != 0); + + for (SMemBlock* blk = blocks; blk != NULL; blk = blk->next) { + ++numOfBlk; + totalSize += blk->size; + + fputs(blk->file, fpAllocLog); + fprintf(fpAllocLog, fmt, blk->line, blk->data, blk->size); + + char sep = '\''; + size_t size = blk->size > 16 ? 16 : blk->size; + for (size_t i = 0; i < size; ++i) { + uint8_t c = (uint8_t)(blk->data[i]); + fputc(sep, fpAllocLog); + sep = ' '; + fputc(hex[c >> 4], fpAllocLog); + fputc(hex[c & 0x0f], fpAllocLog); + } + + fputs("'\n", fpAllocLog); + } + + atomic_store_ptr(&lock, 0); + + fprintf(fpAllocLog, "\nnumber of blocks: %zu, total bytes: %zu\n", numOfBlk, totalSize); + fflush(fpAllocLog); +} + +static void dump_memory_leak_on_sig(int sig) { + fprintf(fpAllocLog, "signal %d received.\n", sig); + + // restore default signal handler + struct sigaction act = {0}; + act.sa_handler = SIG_DFL; + sigaction(sig, &act, NULL); + + dump_memory_leak(); +} + +//////////////////////////////////////////////////////////////////////////////// +// interface functions + +void* taos_malloc(size_t size, const char* file, uint32_t line) { + switch (allocMode) { + case TAOS_ALLOC_MODE_DEFAULT: + return malloc(size); + + case TAOS_ALLOC_MODE_RANDOM_FAIL: + return malloc_random(size, file, line); + + case TAOS_ALLOC_MODE_DETECT_LEAK: + return malloc_detect_leak(size, file, line); + } + return malloc(size); +} + +void* taos_calloc(size_t num, size_t size, const char* file, uint32_t line) { + switch (allocMode) { + case TAOS_ALLOC_MODE_DEFAULT: + return calloc(num, size); + + case TAOS_ALLOC_MODE_RANDOM_FAIL: + return calloc_random(num, size, file, line); + + case TAOS_ALLOC_MODE_DETECT_LEAK: + return calloc_detect_leak(num, size, file, line); + } + return calloc(num, size); +} + +void* taos_realloc(void* ptr, size_t size, const char* file, uint32_t line) { + switch (allocMode) { + case TAOS_ALLOC_MODE_DEFAULT: + return realloc(ptr, size); + + case TAOS_ALLOC_MODE_RANDOM_FAIL: + return realloc_random(ptr, size, file, line); + + case TAOS_ALLOC_MODE_DETECT_LEAK: + return realloc_detect_leak(ptr, size, file, line); + } + return realloc(ptr, size); +} + +void taos_free(void* ptr, const char* file, uint32_t line) { + switch (allocMode) { + case TAOS_ALLOC_MODE_DEFAULT: + return free(ptr); + + case TAOS_ALLOC_MODE_RANDOM_FAIL: + return free(ptr); + + case TAOS_ALLOC_MODE_DETECT_LEAK: + return free_detect_leak(ptr, file, line); + } + return free(ptr); +} + +char* taos_strdup(const char* str, const char* file, uint32_t line) { + switch (allocMode) { + case TAOS_ALLOC_MODE_DEFAULT: + return strdup(str); + + case TAOS_ALLOC_MODE_RANDOM_FAIL: + return strdup_random(str, file, line); + + case TAOS_ALLOC_MODE_DETECT_LEAK: + return strdup_detect_leak(str, file, line); + } + return strdup(str); +} + +char* taos_strndup(const char* str, size_t size, const char* file, uint32_t line) { + switch (allocMode) { + case TAOS_ALLOC_MODE_DEFAULT: + return strndup(str, size); + + case TAOS_ALLOC_MODE_RANDOM_FAIL: + return strndup_random(str, size, file, line); + + case TAOS_ALLOC_MODE_DETECT_LEAK: + return strndup_detect_leak(str, size, file, line); + } + return strndup(str, size); +} + +ssize_t taos_getline(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { + switch (allocMode) { + case TAOS_ALLOC_MODE_DEFAULT: + return getline(lineptr, n, stream); + + case TAOS_ALLOC_MODE_RANDOM_FAIL: + return getline_random(lineptr, n, stream, file, line); + + case TAOS_ALLOC_MODE_DETECT_LEAK: + return getline_detect_leak(lineptr, n, stream, file, line); + } + return getline(lineptr, n, stream); +} + +static void close_alloc_log() { + if (fpAllocLog != NULL) { + if (fpAllocLog != stdout) { + fclose(fpAllocLog); + } + fpAllocLog = NULL; + } +} + +void taosSetAllocMode(int mode, const char* path, bool autoDump) { + assert(mode >= TAOS_ALLOC_MODE_DEFAULT); + assert(mode <= TAOS_ALLOC_MODE_DETECT_LEAK); + + if (fpAllocLog != NULL || allocMode != TAOS_ALLOC_MODE_DEFAULT) { + printf("memory allocation mode can only be set once.\n"); + return; + } + + if (path == NULL || path[0] == 0) { + fpAllocLog = stdout; + } else if ((fpAllocLog = fopen(path, "w")) != NULL) { + atexit(close_alloc_log); + } else { + printf("failed to open memory allocation log file '%s', errno=%d\n", path, errno); + return; + } + + allocMode = mode; + + if (mode == TAOS_ALLOC_MODE_RANDOM_FAIL) { + startTime = taosGetTimestampSec() + 10; + return; + } + + if (autoDump && mode == TAOS_ALLOC_MODE_DETECT_LEAK) { + atexit(dump_memory_leak); + + struct sigaction act = {0}; + act.sa_handler = dump_memory_leak_on_sig; + sigaction(SIGFPE, &act, NULL); + sigaction(SIGSEGV, &act, NULL); + sigaction(SIGILL, &act, NULL); + } +} + +void taosDumpMemoryLeak() { + dump_memory_leak(); + close_alloc_log(); +} + +#else // 'TAOS_MEM_CHECK' not defined + +void taosSetAllocMode(int mode, const char* path, bool autoDump) { + // do nothing +} + +void taosDumpMemoryLeak() { + // do nothing } +#endif // TAOS_MEM_CHECK diff --git a/src/util/src/tmodule.c b/src/util/src/tmodule.c index fabdc1ed4f1c0f93e2c43e1395bb01bd316909b9..54669a20bed2bae1474222fe95790414aceee215 100644 --- a/src/util/src/tmodule.c +++ b/src/util/src/tmodule.c @@ -13,12 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include - #include "os.h" #include "tmodule.h" #include "tutil.h" diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index 68228d377a7d1f8d2da8aa0127752ad4c07622c0..d12cc6e613cc9ce80574ab456e4fddb0ea4d75ad 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -13,58 +13,50 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "os.h" -#include "tutil.h" -#include "tglobalcfg.h" - -#define MAX_NOTE_LINE_SIZE 66000 -#define NOTE_FILE_NAME_LEN 300 - -static int taosNoteFileNum = 1; -static int taosNoteMaxLines = 0; -static int taosNoteLines = 0; -static char taosNoteName[NOTE_FILE_NAME_LEN]; -static int taosNoteFlag = 0; -static int taosNoteFd = -1; -static int taosNoteOpenInProgress = 0; -static pthread_mutex_t taosNoteMutex; -void taosNotePrint(const char * const format, ...); -int taosOpenNoteWithMaxLines(char *fn, int maxLines, int maxNoteNum); - -void taosInitNote(int numOfNoteLines, int maxNotes) +#include "tnote.h" + +taosNoteInfo m_HttpNote; +taosNoteInfo m_TscNote; + +int taosOpenNoteWithMaxLines(char *fn, int maxLines, int maxNoteNum, taosNoteInfo * pNote); + +void taosInitNote(int numOfNoteLines, int maxNotes, char* lable) { + taosNoteInfo * pNote = NULL; char temp[128] = { 0 }; - sprintf(temp, "%s/taosnote", logDir); - if (taosOpenNoteWithMaxLines(temp, numOfNoteLines, maxNotes) < 0) + + if (strcasecmp(lable, "http_note") == 0) { + pNote = &m_HttpNote; + sprintf(temp, "%s/httpnote", logDir); + } else if (strcasecmp(lable, "tsc_note") == 0) { + pNote = &m_TscNote; + sprintf(temp, "%s/tscnote-%d", logDir, getpid()); + } else { + return; + } + + memset(pNote, 0, sizeof(taosNoteInfo)); + pNote->taosNoteFileNum = 1; + //pNote->taosNoteMaxLines = 0; + //pNote->taosNoteLines = 0; + //pNote->taosNoteFlag = 0; + pNote->taosNoteFd = -1; + //pNote->taosNoteOpenInProgress = 0; + + if (taosOpenNoteWithMaxLines(temp, numOfNoteLines, maxNotes, pNote) < 0) fprintf(stderr, "failed to init note file\n"); - taosNotePrint("=================================================="); - taosNotePrint("=================== new note ==================="); - taosNotePrint("=================================================="); + taosNotePrint(pNote, "=================================================="); + taosNotePrint(pNote, "=================== new note ==================="); + taosNotePrint(pNote, "=================================================="); } -void taosCloseNoteByFd(int oldFd); -bool taosLockNote(int fd) +void taosCloseNoteByFd(int oldFd, taosNoteInfo * pNote); +bool taosLockNote(int fd, taosNoteInfo * pNote) { if (fd < 0) return false; - if (taosNoteFileNum > 1) { + if (pNote->taosNoteFileNum > 1) { int ret = (int)(flock(fd, LOCK_EX | LOCK_NB)); if (ret == 0) { return true; @@ -74,11 +66,11 @@ bool taosLockNote(int fd) return false; } -void taosUnLockNote(int fd) +void taosUnLockNote(int fd, taosNoteInfo * pNote) { if (fd < 0) return; - if (taosNoteFileNum > 1) { + if (pNote->taosNoteFileNum > 1) { flock(fd, LOCK_UN | LOCK_NB); } } @@ -86,50 +78,51 @@ void taosUnLockNote(int fd) void *taosThreadToOpenNewNote(void *param) { char name[NOTE_FILE_NAME_LEN]; + taosNoteInfo * pNote = (taosNoteInfo *)param; - taosNoteFlag ^= 1; - taosNoteLines = 0; - sprintf(name, "%s.%d", taosNoteName, taosNoteFlag); + pNote->taosNoteFlag ^= 1; + pNote->taosNoteLines = 0; + sprintf(name, "%s.%d", pNote->taosNoteName, pNote->taosNoteFlag); umask(0); int fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); - taosLockNote(fd); + taosLockNote(fd, pNote); lseek(fd, 0, SEEK_SET); - int oldFd = taosNoteFd; - taosNoteFd = fd; - taosNoteLines = 0; - taosNoteOpenInProgress = 0; - taosNotePrint("=============== new note is opened ============="); + int oldFd = pNote->taosNoteFd; + pNote->taosNoteFd = fd; + pNote->taosNoteLines = 0; + pNote->taosNoteOpenInProgress = 0; + taosNotePrint(pNote, "=============== new note is opened ============="); - taosCloseNoteByFd(oldFd); + taosCloseNoteByFd(oldFd, pNote); return NULL; } -int taosOpenNewNote() +int taosOpenNewNote(taosNoteInfo * pNote) { - pthread_mutex_lock(&taosNoteMutex); + pthread_mutex_lock(&pNote->taosNoteMutex); - if (taosNoteLines > taosNoteMaxLines && taosNoteOpenInProgress == 0) { - taosNoteOpenInProgress = 1; + if (pNote->taosNoteLines > pNote->taosNoteMaxLines && pNote->taosNoteOpenInProgress == 0) { + pNote->taosNoteOpenInProgress = 1; - taosNotePrint("=============== open new note =================="); + taosNotePrint(pNote, "=============== open new note =================="); pthread_t pattern; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - pthread_create(&pattern, &attr, taosThreadToOpenNewNote, NULL); + pthread_create(&pattern, &attr, taosThreadToOpenNewNote, (void*)pNote); pthread_attr_destroy(&attr); } - pthread_mutex_unlock(&taosNoteMutex); + pthread_mutex_unlock(&pNote->taosNoteMutex); - return taosNoteFd; + return pNote->taosNoteFd; } -bool taosCheckNoteIsOpen(char *noteName) +bool taosCheckNoteIsOpen(char *noteName, taosNoteInfo * pNote) { int exist = access(noteName, F_OK); if (exist != 0) { @@ -142,8 +135,8 @@ bool taosCheckNoteIsOpen(char *noteName) return true; } - if (taosLockNote(fd)) { - taosUnLockNote(fd); + if (taosLockNote(fd, pNote)) { + taosUnLockNote(fd, pNote); close(fd); return false; } @@ -153,80 +146,80 @@ bool taosCheckNoteIsOpen(char *noteName) } } -void taosGetNoteName(char *fn) +void taosGetNoteName(char *fn, taosNoteInfo * pNote) { - if (taosNoteFileNum > 1) { - for (int i = 0; i < taosNoteFileNum; i++) { + if (pNote->taosNoteFileNum > 1) { + for (int i = 0; i < pNote->taosNoteFileNum; i++) { char fileName[NOTE_FILE_NAME_LEN]; sprintf(fileName, "%s%d.0", fn, i); - bool file1open = taosCheckNoteIsOpen(fileName); + bool file1open = taosCheckNoteIsOpen(fileName, pNote); sprintf(fileName, "%s%d.1", fn, i); - bool file2open = taosCheckNoteIsOpen(fileName); + bool file2open = taosCheckNoteIsOpen(fileName, pNote); if (!file1open && !file2open) { - sprintf(taosNoteName, "%s%d", fn, i); + sprintf(pNote->taosNoteName, "%s%d", fn, i); return; } } } - strcpy(taosNoteName, fn); + strcpy(pNote->taosNoteName, fn); } -int taosOpenNoteWithMaxLines(char *fn, int maxLines, int maxNoteNum) +int taosOpenNoteWithMaxLines(char *fn, int maxLines, int maxNoteNum, taosNoteInfo * pNote) { char name[NOTE_FILE_NAME_LEN] = "\0"; struct stat notestat0, notestat1; int size; - taosNoteMaxLines = maxLines; - taosNoteFileNum = maxNoteNum; - taosGetNoteName(fn); + pNote->taosNoteMaxLines = maxLines; + pNote->taosNoteFileNum = maxNoteNum; + taosGetNoteName(fn, pNote); strcpy(name, fn); strcat(name, ".0"); // if none of the note files exist, open 0, if both exists, open the old one if (stat(name, ¬estat0) < 0) { - taosNoteFlag = 0; + pNote->taosNoteFlag = 0; } else { strcpy(name, fn); strcat(name, ".1"); if (stat(name, ¬estat1) < 0) { - taosNoteFlag = 1; + pNote->taosNoteFlag = 1; } else { - taosNoteFlag = (notestat0.st_mtime > notestat1.st_mtime) ? 0 : 1; + pNote->taosNoteFlag = (notestat0.st_mtime > notestat1.st_mtime) ? 0 : 1; } } - sprintf(name, "%s.%d", taosNoteName, taosNoteFlag); - pthread_mutex_init(&taosNoteMutex, NULL); + sprintf(name, "%s.%d", pNote->taosNoteName, pNote->taosNoteFlag); + pthread_mutex_init(&pNote->taosNoteMutex, NULL); umask(0); - taosNoteFd = open(name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + pNote->taosNoteFd = open(name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); - if (taosNoteFd < 0) { + if (pNote->taosNoteFd < 0) { fprintf(stderr, "failed to open note file:%s reason:%s\n", name, strerror(errno)); return -1; } - taosLockNote(taosNoteFd); + taosLockNote(pNote->taosNoteFd, pNote); // only an estimate for number of lines struct stat filestat; - fstat(taosNoteFd, &filestat); + fstat(pNote->taosNoteFd, &filestat); size = (int)filestat.st_size; - taosNoteLines = size / 60; + pNote->taosNoteLines = size / 60; - lseek(taosNoteFd, 0, SEEK_END); + lseek(pNote->taosNoteFd, 0, SEEK_END); return 0; } -void taosNotePrint(const char * const format, ...) +void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...) { va_list argpointer; char buffer[MAX_NOTE_LINE_SIZE]; @@ -238,7 +231,7 @@ void taosNotePrint(const char * const format, ...) gettimeofday(&timeSecs, NULL); curTime = timeSecs.tv_sec; ptm = localtime_r(&curTime, &Tm); - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); va_start(argpointer, format); len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); @@ -249,26 +242,26 @@ void taosNotePrint(const char * const format, ...) buffer[len++] = '\n'; buffer[len] = 0; - if (taosNoteFd >= 0) { - twrite(taosNoteFd, buffer, (unsigned int)len); + if (pNote->taosNoteFd >= 0) { + twrite(pNote->taosNoteFd, buffer, (unsigned int)len); - if (taosNoteMaxLines > 0) { - taosNoteLines++; - if ((taosNoteLines > taosNoteMaxLines) && (taosNoteOpenInProgress == 0)) - taosOpenNewNote(); + if (pNote->taosNoteMaxLines > 0) { + pNote->taosNoteLines++; + if ((pNote->taosNoteLines > pNote->taosNoteMaxLines) && (pNote->taosNoteOpenInProgress == 0)) + taosOpenNewNote(pNote); } } } -void taosCloseNote() +void taosCloseNote(taosNoteInfo * pNote) { - taosCloseNoteByFd(taosNoteFd); + taosCloseNoteByFd(pNote->taosNoteFd, pNote); } -void taosCloseNoteByFd(int fd) +void taosCloseNoteByFd(int fd, taosNoteInfo * pNote) { if (fd >= 0) { - taosUnLockNote(fd); + taosUnLockNote(fd, pNote); close(fd); } } diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c index ee16b92b658afde6e25fb6b3adb27a3a3dcdc63c..4eefd2ad958cd6424cb92b4199d0c21821b56716 100644 --- a/src/util/src/tsched.c +++ b/src/util/src/tsched.c @@ -13,17 +13,12 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "tlog.h" #include "tsched.h" +#include "ttimer.h" + +#define DUMP_SCHEDULER_TIME_WINDOW 30000 //every 30sec, take a snap shot of task queue. typedef struct { char label[16]; @@ -36,10 +31,13 @@ typedef struct { int numOfThreads; pthread_t * qthread; SSchedMsg * queue; + + void* pTmrCtrl; + void* pTimer; } SSchedQueue; -void *taosProcessSchedQueue(void *param); -void taosCleanUpScheduler(void *param); +static void *taosProcessSchedQueue(void *param); +static void taosDumpSchedulerStatus(void *qhandle, void *tmrId); void *taosInitScheduler(int queueSize, int numOfThreads, const char *label) { pthread_attr_t attr; @@ -104,6 +102,17 @@ _error: return NULL; } +void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl) { + SSchedQueue* pSched = taosInitScheduler(queueSize, numOfThreads, label); + + if (tmrCtrl != NULL && pSched != NULL) { + pSched->pTmrCtrl = tmrCtrl; + taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); + } + + return pSched; +} + void *taosProcessSchedQueue(void *param) { SSchedMsg msg; SSchedQueue *pSched = (SSchedQueue *)param; @@ -181,8 +190,27 @@ void taosCleanUpScheduler(void *param) { tsem_destroy(&pSched->emptySem); tsem_destroy(&pSched->fullSem); pthread_mutex_destroy(&pSched->queueMutex); + + if (pSched->pTimer) { + taosTmrStopA(&pSched->pTimer); + } free(pSched->queue); free(pSched->qthread); free(pSched); // fix memory leak } + +// for debug purpose, dump the scheduler status every 1min. +void taosDumpSchedulerStatus(void *qhandle, void *tmrId) { + SSchedQueue *pSched = (SSchedQueue *)qhandle; + if (pSched == NULL || pSched->pTimer == NULL || pSched->pTimer != tmrId) { + return; + } + + int32_t size = ((pSched->emptySlot - pSched->fullSlot) + pSched->queueSize) % pSched->queueSize; + if (size > 0) { + pTrace("scheduler:%s, current tasks in queue:%d, task thread:%d", pSched->label, size, pSched->numOfThreads); + } + + taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); +} diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 72469fa75ba52154a9ef21f0a92670fb3ca06b8c..01c91b6c6565e1dc2b0b1cd20eb8c7324eb70ea4 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -12,11 +12,7 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - -#include -#include -#include -#include +#include "os.h" #include "tlog.h" #include "tsdb.h" @@ -570,7 +566,7 @@ int32_t tSkipListIterateList(tSkipList *pSkipList, tSkipListNode ***pRes, bool ( char* tmp = realloc((*pRes), num * POINTER_BYTES); assert(tmp != NULL); - *pRes = tmp; + *pRes = (tSkipListNode**)tmp; } return num; @@ -688,7 +684,7 @@ void tSkipListPrint(tSkipList *pSkipList, int16_t nlevel) { case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BIGINT: - fprintf(stdout, "%d: %lld \n", id++, p->key.i64Key); + fprintf(stdout, "%d: %" PRId64 " \n", id++, p->key.i64Key); break; case TSDB_DATA_TYPE_BINARY: fprintf(stdout, "%d: %s \n", id++, p->key.pz); diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index be352eb7adf908701224388a1138cade5fd40e4f..0ebee5a8f4379b168e7c6e17a68daecf7cd0b457 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -13,25 +13,12 @@ * along with this program. If not, see . */ -#include -#include -#include -#include - -#include -#include -#include -#include -#include - #include "os.h" #include "tglobalcfg.h" #include "tlog.h" #include "tsocket.h" #include "tutil.h" -unsigned int ip2uint(const char *const ip_addr); - /* * Function to get the public ip address of current machine. If get IP * successfully, return 0, else, return -1. The return values is ip. @@ -116,7 +103,7 @@ int taosGetPublicIp(char *const ip) { } // Function converting an IP address string to an unsigned int. -unsigned int ip2uint(const char *const ip_addr) { +uint32_t ip2uint(const char *const ip_addr) { char ip_addr_cpy[20]; char ip[5]; @@ -272,20 +259,19 @@ int taosReadn(int fd, char *ptr, int nbytes) { return (nbytes - nleft); } -int taosOpenUdpSocket(char *ip, short port) { +int taosOpenUdpSocket(char *ip, uint16_t port) { struct sockaddr_in localAddr; int sockFd; int ttl = 128; int reuse, nocheck; int bufSize = 8192000; - pTrace("open udp socket:%s:%d", ip, port); - // if (tsAllowLocalhost) ip = "0.0.0.0"; + pTrace("open udp socket:%s:%hu", ip, port); memset((char *)&localAddr, 0, sizeof(localAddr)); localAddr.sin_family = AF_INET; localAddr.sin_addr.s_addr = inet_addr(ip); - localAddr.sin_port = (uint16_t)htons((uint16_t)port); + localAddr.sin_port = (uint16_t)htons(port); if ((sockFd = (int)socket(AF_INET, SOCK_DGRAM, 0)) < 0) { pError("failed to open udp socket: %d (%s)", errno, strerror(errno)); @@ -331,7 +317,7 @@ int taosOpenUdpSocket(char *ip, short port) { /* bind socket to local address */ if (bind(sockFd, (struct sockaddr *)&localAddr, sizeof(localAddr)) < 0) { - pError("failed to bind udp socket: %d (%s), %s:%d", errno, strerror(errno), ip, port); + pError("failed to bind udp socket: %d (%s), %s:%hu", errno, strerror(errno), ip, port); taosCloseSocket(sockFd); return -1; } @@ -339,13 +325,12 @@ int taosOpenUdpSocket(char *ip, short port) { return sockFd; } -int taosOpenTcpClientSocket(char *destIp, short destPort, char *clientIp) { +int taosOpenTcpClientSocket(char *destIp, uint16_t destPort, char *clientIp) { int sockFd = 0; struct sockaddr_in serverAddr, clientAddr; int ret; pTrace("open tcp client socket:%s:%d", destIp, destPort); - // if (tsAllowLocalhost) destIp = "0.0.0.0"; sockFd = (int)socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); @@ -377,7 +362,7 @@ int taosOpenTcpClientSocket(char *destIp, short destPort, char *clientIp) { ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); if (ret != 0) { - pError("failed to connect socket, ip:%s, port:%d, reason: %s", destIp, destPort, strerror(errno)); + pError("failed to connect socket, ip:%s, port:%hu, reason: %s", destIp, destPort, strerror(errno)); taosCloseSocket(sockFd); sockFd = -1; } @@ -435,18 +420,17 @@ int taosKeepTcpAlive(int sockFd) { return 0; } -int taosOpenTcpServerSocket(char *ip, short port) { +int taosOpenTcpServerSocket(char *ip, uint16_t port) { struct sockaddr_in serverAdd; int sockFd; int reuse; - pTrace("open tcp server socket:%s:%d", ip, port); - // if (tsAllowLocalhost) ip = "0.0.0.0"; + pTrace("open tcp server socket:%s:%hu", ip, port); bzero((char *)&serverAdd, sizeof(serverAdd)); serverAdd.sin_family = AF_INET; serverAdd.sin_addr.s_addr = inet_addr(ip); - serverAdd.sin_port = (uint16_t)htons((uint16_t)port); + serverAdd.sin_port = (uint16_t)htons(port); if ((sockFd = (int)socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { pError("failed to open TCP socket: %d (%s)", errno, strerror(errno)); @@ -463,7 +447,7 @@ int taosOpenTcpServerSocket(char *ip, short port) { /* bind socket to server address */ if (bind(sockFd, (struct sockaddr *)&serverAdd, sizeof(serverAdd)) < 0) { - pError("bind tcp server socket failed, %s:%d, reason:%d(%s)", ip, port, errno, strerror(errno)); + pError("bind tcp server socket failed, %s:%hu, reason:%d(%s)", ip, port, errno, strerror(errno)); close(sockFd); return -1; } @@ -471,7 +455,7 @@ int taosOpenTcpServerSocket(char *ip, short port) { if (taosKeepTcpAlive(sockFd) < 0) return -1; if (listen(sockFd, 10) < 0) { - pError("listen tcp server socket failed, %s:%d, reason:%d(%s)", ip, port, errno, strerror(errno)); + pError("listen tcp server socket failed, %s:%hu, reason:%d(%s)", ip, port, errno, strerror(errno)); return -1; } @@ -483,7 +467,6 @@ int taosOpenRawSocket(char *ip) { struct sockaddr_in rawAdd; pTrace("open udp raw socket:%s", ip); - // if (tsAllowLocalhost) ip = "0.0.0.0"; fd = (int)socket(AF_INET, SOCK_RAW, IPPROTO_UDP); if (fd < 0) { diff --git a/src/util/src/tstatus.c b/src/util/src/tstatus.c deleted file mode 100644 index 570ba547eb238d8951b73ab21a1f78e743e8e753..0000000000000000000000000000000000000000 --- a/src/util/src/tstatus.c +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -char* sdbDnodeStatusStr[] = {"offline", "creating", "unsynced", "slave", "master", "ready"}; - -char* sdbDnodeBalanceStateStr[] = {"balanced", "balancing", "offline removing", "shell removing"}; - -char* sdbVnodeSyncStatusStr[] = {"init", "syncing", "sync_cache", "sync_file"}; - -char* sdbVnodeDropStateStr[] = {"ready", "dropping"}; diff --git a/src/util/src/tstoken.c b/src/util/src/tstoken.c deleted file mode 100644 index e88d3bada67b240ec5627ad924c531cfe6a40338..0000000000000000000000000000000000000000 --- a/src/util/src/tstoken.c +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include - -#include "os.h" -#include "shash.h" -#include "tstoken.h" -void shiftStr(char *dst, char *src); - -static char operator[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '$', '%', '&', 0, '(', ')', '*', '+', - 0, '-', 0, '/', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '<', '=', '>', 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, '[', 0, ']', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '|', 0, 0, 0}; - -static char delimiter[] = { - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ',', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ';', 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -}; - -bool isCharInDelimiter(char c, char *delimiter) { - for (int i = 0; i < strlen(delimiter); i++) { - if (delimiter[i] == c) return true; - } - return false; -} - -char *tscGetTokenDelimiter(char *string, char **token, int *tokenLen, char *delimiters) { - while (*string != 0) { - if (isCharInDelimiter(*string, delimiters)) { - ++string; - } else { - break; - } - } - - *token = string; - - char *str = string; - *tokenLen = 0; - while (*str != 0) { - if (!isCharInDelimiter(*str, delimiters)) { - *tokenLen = *tokenLen + 1; - str++; - } else { - break; - } - } - - return string; -} - -char *tscGetToken(char *string, char **token, int *tokenLen) { - char quote = 0; - - while (*string != 0) { - if (delimiter[*string]) { - ++string; - } else { - break; - } - } - - char quotaChar = 0; - if (*string == '\'' || *string == '\"') { - quote = 1; - quotaChar = *string; - string++; - } - - *token = string; - /* not in string, return token */ - if (*string > 0 && operator[*string] && quote == 0) { - string++; - /* handle the case: insert into tabx using stable1 tags(-1)/tags(+1) - * values(....) */ - if (operator[*string] &&(*string != '(' && *string != ')' && *string != '-' && *string != '+')) - *tokenLen = 2; - else - *tokenLen = 1; - return *token + *tokenLen; - } - - while (*string != 0) { - if (quote) { - // handle escape situation: '\"', the " should not be eliminated - if (*string == quotaChar) { - if (*(string - 1) != '\\') { - break; - } else { - shiftStr(string - 1, string); - } - } else { - ++string; - } - } else { - if (delimiter[*string]) break; - - if (operator[*string]) break; - - ++string; - } - } - - *tokenLen = (int)(string - *token); - - if (quotaChar != 0 && *string != 0 && *(string + 1) != 0) { - return string + 1; - } else { - return string; - } -} - -void shiftStr(char *dst, char *src) { - int32_t i = 0; - do { - dst[i] = src[i]; - i++; - } while (delimiter[src[i]] == 0); - - src[i - 1] = ' '; -} diff --git a/src/util/src/tstrbuild.c b/src/util/src/tstrbuild.c index 6fb970bd6e3de13d6119089759fcf030d6c618fc..61a6d67952a73b8efd0c20a45214a1546f1f0258 100644 --- a/src/util/src/tstrbuild.c +++ b/src/util/src/tstrbuild.c @@ -12,11 +12,8 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - +#include "os.h" #include "tstrbuild.h" -#include -#include -#include void taosStringBuilderEnsureCapacity(SStringBuilder* sb, size_t size) { size += sb->pos; @@ -72,7 +69,7 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) { char buf[64]; - size_t len = sprintf(buf, "%lld", v); + size_t len = sprintf(buf, "%" PRId64, v); taosStringBuilderAppendStringLen(sb, buf, len); } diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c index 5c726390e736a0abc5189796e0173c592757413d..ccac1de518af34c30cca8df05c8fca9f781bdd4e 100644 --- a/src/util/src/ttimer.c +++ b/src/util/src/ttimer.c @@ -13,558 +13,549 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" -#include "tidpool.h" #include "tlog.h" #include "tsched.h" +#include "ttime.h" #include "ttimer.h" #include "tutil.h" -// special mempool without mutex -#define mpool_h void * - -typedef struct { - int numOfFree; /* number of free slots */ - int first; /* the first free slot */ - int numOfBlock; /* the number of blocks */ - int blockSize; /* block size in bytes */ - int * freeList; /* the index list */ - char *pool; /* the actual mem block */ -} pool_t; - -mpool_h tmrMemPoolInit(int maxNum, int blockSize); -char *tmrMemPoolMalloc(mpool_h handle); -void tmrMemPoolFree(mpool_h handle, char *p); -void tmrMemPoolCleanUp(mpool_h handle); - -typedef struct _tmr_obj { - void *param1; - void (*fp)(void *, void *); - tmr_h timerId; - short cycle; - struct _tmr_obj * prev; - struct _tmr_obj * next; - int index; - struct _tmr_ctrl_t *pCtrl; -} tmr_obj_t; - -typedef struct { - tmr_obj_t *head; - int count; -} tmr_list_t; - -typedef struct _tmr_ctrl_t { - void * signature; - pthread_mutex_t mutex; /* mutex to protect critical resource */ - int resolution; /* resolution in mseconds */ - int numOfPeriods; /* total number of periods */ - int64_t periodsFromStart; /* count number of periods since start */ - pthread_t thread; /* timer thread ID */ - tmr_list_t * tmrList; - mpool_h poolHandle; - char label[12]; - int maxNumOfTmrs; - int numOfTmrs; - int ticks; - int maxTicks; - int tmrCtrlId; -} tmr_ctrl_t; -uint32_t tmrDebugFlag = DEBUG_ERROR | DEBUG_WARN | DEBUG_FILE; -void taosTmrProcessList(tmr_ctrl_t *); - -tmr_ctrl_t tmrCtrl[MAX_NUM_OF_TMRCTL]; -int numOfTmrCtrl = 0; -void * tmrIdPool = NULL; -void * tmrQhandle; -int taosTmrThreads = 1; - -void taosTimerLoopFunc(int signo) { - tmr_ctrl_t *pCtrl; - int count = 0; - - for (int i = 1; i < MAX_NUM_OF_TMRCTL; ++i) { - pCtrl = tmrCtrl + i; - if (pCtrl->signature) { - count++; - pCtrl->ticks++; - if (pCtrl->ticks >= pCtrl->maxTicks) { - taosTmrProcessList(pCtrl); - pCtrl->ticks = 0; - } - if (count >= numOfTmrCtrl) break; - } - } -} +#define TIMER_STATE_WAITING 0 +#define TIMER_STATE_EXPIRED 1 +#define TIMER_STATE_STOPPED 2 +#define TIMER_STATE_CANCELED 3 -void taosTmrModuleInit(void) { - tmrIdPool = taosInitIdPool(MAX_NUM_OF_TMRCTL); - memset(tmrCtrl, 0, sizeof(tmrCtrl)); +typedef union _tmr_ctrl_t { + char label[16]; + struct { + // pad to ensure 'next' is the end of this union + char padding[16 - sizeof(union _tmr_ctrl_t*)]; + union _tmr_ctrl_t* next; + }; +} tmr_ctrl_t; - taosInitTimer(taosTimerLoopFunc, MSECONDS_PER_TICK); +typedef struct tmr_obj_t { + uintptr_t id; + tmr_ctrl_t* ctrl; + struct tmr_obj_t* mnext; + struct tmr_obj_t* prev; + struct tmr_obj_t* next; + uint16_t slot; + uint8_t wheel; + uint8_t state; + uint8_t refCount; + uint8_t reserved1; + uint16_t reserved2; + union { + int64_t expireAt; + int64_t executedBy; + }; + TAOS_TMR_CALLBACK fp; + void* param; +} tmr_obj_t; - tmrQhandle = taosInitScheduler(10000, taosTmrThreads, "tmr"); - tmrTrace("timer module is initialized, thread:%d", taosTmrThreads); +typedef struct timer_list_t { + int64_t lockedBy; + tmr_obj_t* timers; +} timer_list_t; + +typedef struct timer_map_t { + uint32_t size; + uint32_t count; + timer_list_t* slots; +} timer_map_t; + +typedef struct time_wheel_t { + pthread_mutex_t mutex; + int64_t nextScanAt; + uint32_t resolution; + uint16_t size; + uint16_t index; + tmr_obj_t** slots; +} time_wheel_t; + +uint32_t tmrDebugFlag = DEBUG_ERROR | DEBUG_WARN | DEBUG_FILE; +uint32_t taosMaxTmrCtrl = 512; + +static pthread_once_t tmrModuleInit = PTHREAD_ONCE_INIT; +static pthread_mutex_t tmrCtrlMutex; +static tmr_ctrl_t* tmrCtrls; +static tmr_ctrl_t* unusedTmrCtrl = NULL; +static void* tmrQhandle; +static int numOfTmrCtrl = 0; + +int taosTmrThreads = 1; + +static uintptr_t nextTimerId = 0; + +static time_wheel_t wheels[] = { + {.resolution = MSECONDS_PER_TICK, .size = 4096}, + {.resolution = 1000, .size = 1024}, + {.resolution = 60000, .size = 1024}, +}; +static timer_map_t timerMap; + +static uintptr_t getNextTimerId() { + uintptr_t id; + do { + id = atomic_add_fetch_ptr(&nextTimerId, 1); + } while (id == 0); + return id; } -void *taosTmrInit(int maxNumOfTmrs, int resolution, int longest, char *label) { - static pthread_once_t tmrInit = PTHREAD_ONCE_INIT; - tmr_ctrl_t * pCtrl; - - pthread_once(&tmrInit, taosTmrModuleInit); - - int tmrCtrlId = taosAllocateId(tmrIdPool); - - if (tmrCtrlId < 0) { - tmrError("%s bug!!! too many timers!!!", label); - return NULL; - } - - pCtrl = tmrCtrl + tmrCtrlId; - tfree(pCtrl->tmrList); - tmrMemPoolCleanUp(pCtrl->poolHandle); - - memset(pCtrl, 0, sizeof(tmr_ctrl_t)); - - pCtrl->tmrCtrlId = tmrCtrlId; - strcpy(pCtrl->label, label); - pCtrl->maxNumOfTmrs = maxNumOfTmrs; +static void timerAddRef(tmr_obj_t* timer) { atomic_add_fetch_8(&timer->refCount, 1); } - if ((pCtrl->poolHandle = tmrMemPoolInit(maxNumOfTmrs + 10, sizeof(tmr_obj_t))) == NULL) { - tmrError("%s failed to allocate mem pool", label); - tmrMemPoolCleanUp(pCtrl->poolHandle); - return NULL; +static void timerDecRef(tmr_obj_t* timer) { + if (atomic_sub_fetch_8(&timer->refCount, 1) == 0) { + free(timer); } +} - if (resolution < MSECONDS_PER_TICK) resolution = MSECONDS_PER_TICK; - pCtrl->resolution = resolution; - pCtrl->maxTicks = resolution / MSECONDS_PER_TICK; - pCtrl->ticks = rand() / pCtrl->maxTicks; - pCtrl->numOfPeriods = longest / resolution; - if (pCtrl->numOfPeriods < 10) pCtrl->numOfPeriods = 10; - - pCtrl->tmrList = (tmr_list_t *)malloc(sizeof(tmr_list_t) * pCtrl->numOfPeriods); - if (pCtrl->tmrList == NULL) { - tmrError("%s failed to allocate(size:%d) mem for tmrList", label, sizeof(tmr_list_t) * pCtrl->numOfPeriods); - tmrMemPoolCleanUp(pCtrl->poolHandle); - taosTmrCleanUp(pCtrl); - return NULL; - } - - for (int i = 0; i < pCtrl->numOfPeriods; i++) { - pCtrl->tmrList[i].head = NULL; - pCtrl->tmrList[i].count = 0; +static void lockTimerList(timer_list_t* list) { + int64_t tid = taosGetPthreadId(); + int i = 0; + while (atomic_val_compare_exchange_64(&(list->lockedBy), 0, tid) != 0) { + if (++i % 1000 == 0) { + sched_yield(); + } } +} - if (pthread_mutex_init(&pCtrl->mutex, NULL) < 0) { - tmrError("%s failed to create the mutex, reason:%s", label, strerror(errno)); - taosTmrCleanUp(pCtrl); - return NULL; +static void unlockTimerList(timer_list_t* list) { + int64_t tid = taosGetPthreadId(); + if (atomic_val_compare_exchange_64(&(list->lockedBy), tid, 0) != tid) { + assert(false); + tmrError("%d trying to unlock a timer list not locked by current thread.", tid); } - - pCtrl->signature = pCtrl; - numOfTmrCtrl++; - tmrTrace("%s timer ctrl is initialized, index:%d", label, tmrCtrlId); - return pCtrl; } -void taosTmrProcessList(tmr_ctrl_t *pCtrl) { - unsigned int index; - tmr_list_t * pList; - tmr_obj_t * pObj, *header; +static void addTimer(tmr_obj_t* timer) { + timerAddRef(timer); + timer->wheel = tListLen(wheels); - pthread_mutex_lock(&pCtrl->mutex); - index = pCtrl->periodsFromStart % pCtrl->numOfPeriods; - pList = &pCtrl->tmrList[index]; + uint32_t idx = (uint32_t)(timer->id % timerMap.size); + timer_list_t* list = timerMap.slots + idx; - while (1) { - header = pList->head; - if (header == NULL) break; + lockTimerList(list); + timer->mnext = list->timers; + list->timers = timer; + unlockTimerList(list); +} - if (header->cycle > 0) { - pObj = header; - while (pObj) { - pObj->cycle--; - pObj = pObj->next; +static tmr_obj_t* findTimer(uintptr_t id) { + tmr_obj_t* timer = NULL; + if (id > 0) { + uint32_t idx = (uint32_t)(id % timerMap.size); + timer_list_t* list = timerMap.slots + idx; + lockTimerList(list); + for (timer = list->timers; timer != NULL; timer = timer->mnext) { + if (timer->id == id) { + timerAddRef(timer); + break; } - break; } - - pCtrl->numOfTmrs--; - tmrTrace("%s %p, timer expired, fp:%p, tmr_h:%p, index:%d, total:%d", pCtrl->label, header->param1, header->fp, - header, index, pCtrl->numOfTmrs); - - pList->head = header->next; - if (header->next) header->next->prev = NULL; - pList->count--; - header->timerId = NULL; - - SSchedMsg schedMsg; - schedMsg.fp = NULL; - schedMsg.tfp = header->fp; - schedMsg.ahandle = header->param1; - schedMsg.thandle = header; - taosScheduleTask(tmrQhandle, &schedMsg); - - tmrMemPoolFree(pCtrl->poolHandle, (char *)header); + unlockTimerList(list); } - - pCtrl->periodsFromStart++; - pthread_mutex_unlock(&pCtrl->mutex); -} - -void taosTmrCleanUp(void *handle) { - tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; - if (pCtrl == NULL || pCtrl->signature != pCtrl) return; - - pCtrl->signature = NULL; - taosFreeId(tmrIdPool, pCtrl->tmrCtrlId); - numOfTmrCtrl--; - tmrTrace("%s is cleaned up, numOfTmrs:%d", pCtrl->label, numOfTmrCtrl); + return timer; } -tmr_h taosTmrStart(void (*fp)(void *, void *), int mseconds, void *param1, void *handle) { - tmr_obj_t * pObj, *cNode, *pNode; - tmr_list_t *pList; - int index, period; - tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; - - if (handle == NULL) return NULL; - - period = mseconds / pCtrl->resolution; - if (pthread_mutex_lock(&pCtrl->mutex) != 0) - tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); - - pObj = (tmr_obj_t *)tmrMemPoolMalloc(pCtrl->poolHandle); - if (pObj == NULL) { - tmrError("%s reach max number of timers:%d", pCtrl->label, pCtrl->maxNumOfTmrs); - pthread_mutex_unlock(&pCtrl->mutex); - return NULL; +static void removeTimer(uintptr_t id) { + tmr_obj_t* prev = NULL; + uint32_t idx = (uint32_t)(id % timerMap.size); + timer_list_t* list = timerMap.slots + idx; + lockTimerList(list); + for (tmr_obj_t* p = list->timers; p != NULL; p = p->mnext) { + if (p->id == id) { + if (prev == NULL) { + list->timers = p->mnext; + } else { + prev->mnext = p->mnext; + } + timerDecRef(p); + break; + } + prev = p; } + unlockTimerList(list); +} - pObj->cycle = period / pCtrl->numOfPeriods; - pObj->param1 = param1; - pObj->fp = fp; - pObj->timerId = pObj; - pObj->pCtrl = pCtrl; - - index = (period + pCtrl->periodsFromStart) % pCtrl->numOfPeriods; - int cindex = (pCtrl->periodsFromStart) % pCtrl->numOfPeriods; - pList = &(pCtrl->tmrList[index]); - - pObj->index = index; - cNode = pList->head; - pNode = NULL; - - while (cNode != NULL) { - if (cNode->cycle < pObj->cycle) { - pNode = cNode; - cNode = cNode->next; - } else { +static void addToWheel(tmr_obj_t* timer, uint32_t delay) { + timerAddRef(timer); + // select a wheel for the timer, we are not an accurate timer, + // but the inaccuracy should not be too large. + timer->wheel = tListLen(wheels) - 1; + for (uint8_t i = 0; i < tListLen(wheels); i++) { + time_wheel_t* wheel = wheels + i; + if (delay < wheel->resolution * wheel->size) { + timer->wheel = i; break; } } - pObj->next = cNode; - pObj->prev = pNode; + time_wheel_t* wheel = wheels + timer->wheel; + timer->prev = NULL; + timer->expireAt = taosGetTimestampMs() + delay; - if (cNode != NULL) { - cNode->prev = pObj; - } + pthread_mutex_lock(&wheel->mutex); - if (pNode != NULL) { - pNode->next = pObj; - } else { - pList->head = pObj; + uint32_t idx = 0; + if (timer->expireAt > wheel->nextScanAt) { + // adjust delay according to next scan time of this wheel + // so that the timer is not fired earlier than desired. + delay = (uint32_t)(timer->expireAt - wheel->nextScanAt); + idx = (delay + wheel->resolution - 1) / wheel->resolution; } - pList->count++; - pCtrl->numOfTmrs++; - - if (pthread_mutex_unlock(&pCtrl->mutex) != 0) - tmrError("%s mutex unlock failed, reason:%s", pCtrl->label, strerror(errno)); - - tmrTrace("%s %p, timer started, fp:%p, tmr_h:%p, index:%d, total:%d cindex:%d", pCtrl->label, param1, fp, pObj, index, - pCtrl->numOfTmrs, cindex); + timer->slot = (uint16_t)((wheel->index + idx + 1) % wheel->size); + tmr_obj_t* p = wheel->slots[timer->slot]; + wheel->slots[timer->slot] = timer; + timer->next = p; + if (p != NULL) { + p->prev = timer; + } - return (tmr_h)pObj; + pthread_mutex_unlock(&wheel->mutex); } -void taosTmrStop(tmr_h timerId) { - tmr_obj_t * pObj; - tmr_list_t *pList; - tmr_ctrl_t *pCtrl; - - pObj = (tmr_obj_t *)timerId; - if (pObj == NULL) return; - - pCtrl = pObj->pCtrl; - if (pCtrl == NULL) return; - - if (pthread_mutex_lock(&pCtrl->mutex) != 0) - tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); - - if (pObj->timerId == timerId) { - pList = &(pCtrl->tmrList[pObj->index]); - if (pObj->prev) { - pObj->prev->next = pObj->next; - } else { - pList->head = pObj->next; +static bool removeFromWheel(tmr_obj_t* timer) { + if (timer->wheel >= tListLen(wheels)) { + return false; + } + time_wheel_t* wheel = wheels + timer->wheel; + + bool removed = false; + pthread_mutex_lock(&wheel->mutex); + // other thread may modify timer->wheel, check again. + if (timer->wheel < tListLen(wheels)) { + if (timer->prev != NULL) { + timer->prev->next = timer->next; } - - if (pObj->next) { - pObj->next->prev = pObj->prev; + if (timer->next != NULL) { + timer->next->prev = timer->prev; } - - pList->count--; - pObj->timerId = NULL; - pCtrl->numOfTmrs--; - - tmrTrace("%s %p, timer stopped, fp:%p, tmr_h:%p, total:%d", pCtrl->label, pObj->param1, pObj->fp, pObj, - pCtrl->numOfTmrs); - tmrMemPoolFree(pCtrl->poolHandle, (char *)(pObj)); + if (timer == wheel->slots[timer->slot]) { + wheel->slots[timer->slot] = timer->next; + } + timer->wheel = tListLen(wheels); + timer->next = NULL; + timer->prev = NULL; + timerDecRef(timer); + removed = true; } + pthread_mutex_unlock(&wheel->mutex); - pthread_mutex_unlock(&pCtrl->mutex); + return removed; } -void taosTmrStopA(tmr_h *timerId) { - tmr_obj_t * pObj; - tmr_list_t *pList; - tmr_ctrl_t *pCtrl; +static void processExpiredTimer(void* handle, void* arg) { + tmr_obj_t* timer = (tmr_obj_t*)handle; + timer->executedBy = taosGetPthreadId(); + uint8_t state = atomic_val_compare_exchange_8(&timer->state, TIMER_STATE_WAITING, TIMER_STATE_EXPIRED); + if (state == TIMER_STATE_WAITING) { + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] execution start."; + tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); - pObj = *(tmr_obj_t **)timerId; - if (pObj == NULL) return; + (*timer->fp)(timer->param, (tmr_h)timer->id); + atomic_store_8(&timer->state, TIMER_STATE_STOPPED); - pCtrl = pObj->pCtrl; - if (pCtrl == NULL) return; - - if (pthread_mutex_lock(&pCtrl->mutex) != 0) - tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); + fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] execution end."; + tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); + } + removeTimer(timer->id); + timerDecRef(timer); +} - if (pObj->timerId == pObj) { - pList = &(pCtrl->tmrList[pObj->index]); - if (pObj->prev) { - pObj->prev->next = pObj->next; - } else { - pList->head = pObj->next; - } +static void addToExpired(tmr_obj_t* head) { + const char* fmt = "%s adding expired timer[id=%" PRIuPTR ", fp=%p, param=%p] to queue."; - if (pObj->next) { - pObj->next->prev = pObj->prev; - } + while (head != NULL) { + uintptr_t id = head->id; + tmr_obj_t* next = head->next; + tmrTrace(fmt, head->ctrl->label, id, head->fp, head->param); - pList->count--; - pObj->timerId = NULL; - pCtrl->numOfTmrs--; + SSchedMsg schedMsg; + schedMsg.fp = NULL; + schedMsg.tfp = processExpiredTimer; + schedMsg.ahandle = head; + schedMsg.thandle = NULL; + taosScheduleTask(tmrQhandle, &schedMsg); - tmrTrace("%s %p, timer stopped atomiclly, fp:%p, tmr_h:%p, total:%d", pCtrl->label, pObj->param1, pObj->fp, pObj, - pCtrl->numOfTmrs); - tmrMemPoolFree(pCtrl->poolHandle, (char *)(pObj)); + tmrTrace("timer[id=%" PRIuPTR "] has been added to queue.", id); + head = next; + } +} - *(tmr_obj_t **)timerId = NULL; +static uintptr_t doStartTimer(tmr_obj_t* timer, TAOS_TMR_CALLBACK fp, int mseconds, void* param, tmr_ctrl_t* ctrl) { + uintptr_t id = getNextTimerId(); + timer->id = id; + timer->state = TIMER_STATE_WAITING; + timer->fp = fp; + timer->param = param; + timer->ctrl = ctrl; + addTimer(timer); + + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] started"; + tmrTrace(fmt, ctrl->label, timer->id, timer->fp, timer->param); + + if (mseconds == 0) { + timer->wheel = tListLen(wheels); + timerAddRef(timer); + addToExpired(timer); } else { - tmrTrace("%s %p, timer stopped atomiclly, fp:%p, tmr_h:%p, total:%d", pCtrl->label, pObj->param1, pObj->fp, pObj, - pCtrl->numOfTmrs); + addToWheel(timer, mseconds); } - pthread_mutex_unlock(&pCtrl->mutex); + // note: use `timer->id` here is unsafe as `timer` may already be freed + return id; } -void taosTmrReset(void (*fp)(void *, void *), int mseconds, void *param1, void *handle, tmr_h *pTmrId) { - tmr_obj_t * pObj, *cNode, *pNode; - tmr_list_t *pList; - int index, period; - tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; - - if (handle == NULL) return; - if (pTmrId == NULL) return; - - period = mseconds / pCtrl->resolution; - if (pthread_mutex_lock(&pCtrl->mutex) != 0) - tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); +tmr_h taosTmrStart(TAOS_TMR_CALLBACK fp, int mseconds, void* param, void* handle) { + tmr_ctrl_t* ctrl = (tmr_ctrl_t*)handle; + if (ctrl == NULL || ctrl->label[0] == 0) { + return NULL; + } - pObj = (tmr_obj_t *)(*pTmrId); + tmr_obj_t* timer = (tmr_obj_t*)calloc(1, sizeof(tmr_obj_t)); + if (timer == NULL) { + tmrError("%s failed to allocated memory for new timer object.", ctrl->label); + return NULL; + } - if (pObj && pObj->timerId == *pTmrId) { - // exist, stop it first - pList = &(pCtrl->tmrList[pObj->index]); - if (pObj->prev) { - pObj->prev->next = pObj->next; - } else { - pList->head = pObj->next; - } + return (tmr_h)doStartTimer(timer, fp, mseconds, param, ctrl); +} - if (pObj->next) { - pObj->next->prev = pObj->prev; +static void taosTimerLoopFunc(int signo) { + int64_t now = taosGetTimestampMs(); + + for (int i = 0; i < tListLen(wheels); i++) { + // `expried` is a temporary expire list. + // expired timers are first add to this list, then move + // to expired queue as a batch to improve performance. + // note this list is used as a stack in this function. + tmr_obj_t* expired = NULL; + + time_wheel_t* wheel = wheels + i; + while (now >= wheel->nextScanAt) { + pthread_mutex_lock(&wheel->mutex); + wheel->index = (wheel->index + 1) % wheel->size; + tmr_obj_t* timer = wheel->slots[wheel->index]; + while (timer != NULL) { + tmr_obj_t* next = timer->next; + if (now < timer->expireAt) { + timer = next; + continue; + } + + // remove from the wheel + if (timer->prev == NULL) { + wheel->slots[wheel->index] = next; + if (next != NULL) { + next->prev = NULL; + } + } else { + timer->prev->next = next; + if (next != NULL) { + next->prev = timer->prev; + } + } + timer->wheel = tListLen(wheels); + + // add to temporary expire list + timer->next = expired; + timer->prev = NULL; + if (expired != NULL) { + expired->prev = timer; + } + expired = timer; + + timer = next; + } + pthread_mutex_unlock(&wheel->mutex); + wheel->nextScanAt += wheel->resolution; } - pList->count--; - pObj->timerId = NULL; - pCtrl->numOfTmrs--; - } else { - // timer not there, or already expired - pObj = (tmr_obj_t *)tmrMemPoolMalloc(pCtrl->poolHandle); - *pTmrId = pObj; + addToExpired(expired); + } +} - if (pObj == NULL) { - tmrError("%s failed to allocate timer, max:%d allocated:%d", pCtrl->label, pCtrl->maxNumOfTmrs, pCtrl->numOfTmrs); - pthread_mutex_unlock(&pCtrl->mutex); - return; +static bool doStopTimer(tmr_obj_t* timer, uint8_t state) { + if (state == TIMER_STATE_WAITING) { + bool reusable = false; + if (removeFromWheel(timer)) { + removeTimer(timer->id); + // only safe to reuse the timer when timer is removed from the wheel. + // we cannot guarantee the thread safety of the timr in all other cases. + reusable = true; } + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] is cancelled."; + tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); + return reusable; + } + + if (state != TIMER_STATE_EXPIRED) { + // timer already stopped or cancelled, has nothing to do in this case + return false; } + + if (timer->executedBy == taosGetPthreadId()) { + // taosTmrReset is called in the timer callback, should do nothing in this + // case to avoid dead lock. note taosTmrReset must be the last statement + // of the callback funtion, will be a bug otherwise. + return false; + } + + // timer callback is executing in another thread, we SHOULD wait it to stop, + // BUT this may result in dead lock if current thread are holding a lock which + // the timer callback need to acquire. so, we HAVE TO return directly. + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] is executing and cannot be stopped."; + tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); + return false; +} - pObj->cycle = period / pCtrl->numOfPeriods; - pObj->param1 = param1; - pObj->fp = fp; - pObj->timerId = pObj; - pObj->pCtrl = pCtrl; +bool taosTmrStop(tmr_h timerId) { + uintptr_t id = (uintptr_t)timerId; - index = (period + pCtrl->periodsFromStart) % pCtrl->numOfPeriods; - pList = &(pCtrl->tmrList[index]); + tmr_obj_t* timer = findTimer(id); + if (timer == NULL) { + tmrTrace("timer[id=%" PRIuPTR "] does not exist", id); + return false; + } - pObj->index = index; - cNode = pList->head; - pNode = NULL; + uint8_t state = atomic_val_compare_exchange_8(&timer->state, TIMER_STATE_WAITING, TIMER_STATE_CANCELED); + doStopTimer(timer, state); + timerDecRef(timer); - while (cNode != NULL) { - if (cNode->cycle < pObj->cycle) { - pNode = cNode; - cNode = cNode->next; - } else { - break; - } - } + return state == TIMER_STATE_WAITING; +} - pObj->next = cNode; - pObj->prev = pNode; +bool taosTmrStopA(tmr_h* timerId) { + bool ret = taosTmrStop(*timerId); + *timerId = NULL; + return ret; +} - if (cNode != NULL) { - cNode->prev = pObj; +bool taosTmrReset(TAOS_TMR_CALLBACK fp, int mseconds, void* param, void* handle, tmr_h* pTmrId) { + tmr_ctrl_t* ctrl = (tmr_ctrl_t*)handle; + if (ctrl == NULL || ctrl->label[0] == 0) { + return NULL; } - if (pNode != NULL) { - pNode->next = pObj; + uintptr_t id = (uintptr_t)*pTmrId; + bool stopped = false; + tmr_obj_t* timer = findTimer(id); + if (timer == NULL) { + tmrTrace("%s timer[id=%" PRIuPTR "] does not exist", ctrl->label, id); } else { - pList->head = pObj; + uint8_t state = atomic_val_compare_exchange_8(&timer->state, TIMER_STATE_WAITING, TIMER_STATE_CANCELED); + if (!doStopTimer(timer, state)) { + timerDecRef(timer); + timer = NULL; + } + stopped = state == TIMER_STATE_WAITING; } - pList->count++; - pCtrl->numOfTmrs++; - - if (pthread_mutex_unlock(&pCtrl->mutex) != 0) - tmrError("%s mutex unlock failed, reason:%s", pCtrl->label, strerror(errno)); - - tmrTrace("%s %p, timer is reset, fp:%p, tmr_h:%p, index:%d, total:%d numOfFree:%d", pCtrl->label, param1, fp, pObj, - index, pCtrl->numOfTmrs, ((pool_t *)pCtrl->poolHandle)->numOfFree); + if (timer == NULL) { + *pTmrId = taosTmrStart(fp, mseconds, param, handle); + return stopped; + } - return; -} + tmrTrace("%s timer[id=%" PRIuPTR "] is reused", ctrl->label, timer->id); -void taosTmrList(void *handle) { - int i; - tmr_list_t *pList; - tmr_obj_t * pObj; - tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; - - for (i = 0; i < pCtrl->numOfPeriods; ++i) { - pList = &(pCtrl->tmrList[i]); - pObj = pList->head; - if (!pObj) continue; - printf("\nindex=%d count:%d\n", i, pList->count); - while (pObj) { - pObj = pObj->next; + // wait until there's no other reference to this timer, + // so that we can reuse this timer safely. + for (int i = 1; atomic_load_8(&timer->refCount) > 1; ++i) { + if (i % 1000 == 0) { + sched_yield(); } } -} -mpool_h tmrMemPoolInit(int numOfBlock, int blockSize) { - int i; - pool_t *pool_p; + assert(timer->refCount == 1); + memset(timer, 0, sizeof(*timer)); + *pTmrId = (tmr_h)doStartTimer(timer, fp, mseconds, param, ctrl); - if (numOfBlock <= 1 || blockSize <= 1) { - tmrError("invalid parameter in memPoolInit\n"); - return NULL; + return stopped; +} + +static void taosTmrModuleInit(void) { + tmrCtrls = malloc(sizeof(tmr_ctrl_t) * taosMaxTmrCtrl); + if (tmrCtrls == NULL) { + tmrError("failed to allocate memory for timer controllers."); + return; } - pool_p = (pool_t *)malloc(sizeof(pool_t)); - if (pool_p == NULL) { - tmrError("mempool malloc failed\n"); - return NULL; - } else { - memset(pool_p, 0, sizeof(pool_t)); + for (int i = 0; i < taosMaxTmrCtrl - 1; ++i) { + tmr_ctrl_t* ctrl = tmrCtrls + i; + ctrl->next = ctrl + 1; } + unusedTmrCtrl = tmrCtrls; - pool_p->blockSize = blockSize; - pool_p->numOfBlock = numOfBlock; - pool_p->pool = (char *)malloc(blockSize * numOfBlock); - pool_p->freeList = (int *)malloc(sizeof(int) * numOfBlock); + pthread_mutex_init(&tmrCtrlMutex, NULL); - if (pool_p->pool == NULL || pool_p->freeList == NULL) { - tmrError("failed to allocate memory\n"); - tfree(pool_p->freeList); - tfree(pool_p->pool); - free(pool_p); - return NULL; + int64_t now = taosGetTimestampMs(); + for (int i = 0; i < tListLen(wheels); i++) { + time_wheel_t* wheel = wheels + i; + if (pthread_mutex_init(&wheel->mutex, NULL) != 0) { + tmrError("failed to create the mutex for wheel, reason:%s", strerror(errno)); + return; + } + wheel->nextScanAt = now + wheel->resolution; + wheel->index = 0; + wheel->slots = (tmr_obj_t**)calloc(wheel->size, sizeof(tmr_obj_t*)); + if (wheel->slots == NULL) { + tmrError("failed to allocate wheel slots"); + return; + } + timerMap.size += wheel->size; } - memset(pool_p->pool, 0, blockSize * numOfBlock); - for (i = 0; i < pool_p->numOfBlock; ++i) pool_p->freeList[i] = i; + timerMap.count = 0; + timerMap.slots = (timer_list_t*)calloc(timerMap.size, sizeof(timer_list_t)); + if (timerMap.slots == NULL) { + tmrError("failed to allocate hash map"); + return; + } - pool_p->first = 0; - pool_p->numOfFree = pool_p->numOfBlock; + tmrQhandle = taosInitScheduler(10000, taosTmrThreads, "tmr"); + taosInitTimer(taosTimerLoopFunc, MSECONDS_PER_TICK); - return (mpool_h)pool_p; + tmrTrace("timer module is initialized, number of threads: %d", taosTmrThreads); } -char *tmrMemPoolMalloc(mpool_h handle) { - char * pos = NULL; - pool_t *pool_p = (pool_t *)handle; +void* taosTmrInit(int maxNumOfTmrs, int resolution, int longest, const char* label) { + pthread_once(&tmrModuleInit, taosTmrModuleInit); - if (pool_p->numOfFree <= 0 || pool_p->numOfFree > pool_p->numOfBlock) { - tmrError("mempool: out of memory, numOfFree:%d, numOfBlock:%d", pool_p->numOfFree, pool_p->numOfBlock); - } else { - pos = pool_p->pool + pool_p->blockSize * (pool_p->freeList[pool_p->first]); - pool_p->first++; - pool_p->first = pool_p->first % pool_p->numOfBlock; - pool_p->numOfFree--; + pthread_mutex_lock(&tmrCtrlMutex); + tmr_ctrl_t* ctrl = unusedTmrCtrl; + if (ctrl != NULL) { + unusedTmrCtrl = ctrl->next; + numOfTmrCtrl++; } + pthread_mutex_unlock(&tmrCtrlMutex); - return pos; -} - -void tmrMemPoolFree(mpool_h handle, char *pMem) { - int index; - pool_t *pool_p = (pool_t *)handle; - - if (pMem == NULL) return; + if (ctrl == NULL) { + tmrError("%s too many timer controllers, failed to create timer controller.", label); + return NULL; + } - index = (int)(pMem - pool_p->pool) / pool_p->blockSize; + strncpy(ctrl->label, label, sizeof(ctrl->label)); + ctrl->label[sizeof(ctrl->label) - 1] = 0; + tmrTrace("%s timer controller is initialized, number of timer controllers: %d.", label, numOfTmrCtrl); + return ctrl; +} - if (index < 0 || index >= pool_p->numOfBlock) { - tmrError("tmr mempool: error, invalid address:%p\n", pMem); - } else { - memset(pMem, 0, pool_p->blockSize); - pool_p->freeList[(pool_p->first + pool_p->numOfFree) % pool_p->numOfBlock] = index; - pool_p->numOfFree++; +void taosTmrCleanUp(void* handle) { + tmr_ctrl_t* ctrl = (tmr_ctrl_t*)handle; + if (ctrl == NULL || ctrl->label[0] == 0) { + return; } -} -void tmrMemPoolCleanUp(mpool_h handle) { - pool_t *pool_p = (pool_t *)handle; - if (pool_p == NULL) return; + tmrTrace("%s timer controller is cleaned up.", ctrl->label); + ctrl->label[0] = 0; - if (pool_p->pool) free(pool_p->pool); - if (pool_p->freeList) free(pool_p->freeList); - memset(&pool_p, 0, sizeof(pool_p)); - free(pool_p); + pthread_mutex_lock(&tmrCtrlMutex); + ctrl->next = unusedTmrCtrl; + numOfTmrCtrl--; + unusedTmrCtrl = ctrl; + pthread_mutex_unlock(&tmrCtrlMutex); } diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 99f0f5bb91c3f76acccca9d45687ce26ef816b7b..0fbc4dc93503db57dd1fd98e79516cfbd89a5515 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -13,14 +13,12 @@ * along with this program. If not, see . */ -#include -#include -#include - #include "os.h" #include "shash.h" -#include "tsql.h" #include "tutil.h" +#include "tsqldef.h" +#include "tstoken.h" +#include "ttypes.h" // All the keywords of the SQL language are stored in a hash table typedef struct SKeyword { @@ -96,7 +94,6 @@ static SKeyword keywordTable[] = { {"TABLE", TK_TABLE}, {"DATABASE", TK_DATABASE}, {"DNODE", TK_DNODE}, - {"IP", TK_IP}, {"USER", TK_USER}, {"ACCOUNT", TK_ACCOUNT}, {"USE", TK_USE}, @@ -227,6 +224,7 @@ static SKeyword keywordTable[] = { {"METRICS", TK_METRICS}, {"STABLE", TK_STABLE}, {"FILE", TK_FILE}, + {"VNODES", TK_VNODES}, }; /* This is the hash table */ @@ -420,7 +418,12 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { int delim = z[0]; bool strEnd = false; for (i = 1; z[i]; i++) { - if (z[i] == delim) { + if (z[i] == '\\') { + i++; + continue; + } + + if (z[i] == delim ) { if (z[i + 1] == delim) { i++; } else { @@ -429,6 +432,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { } } } + if (z[i]) i++; if (strEnd) { @@ -506,7 +510,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { if ((z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' || z[i] == 'y' || z[i] == 'w' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' || z[i] == 'Y' || z[i] == 'W') && - (isIdChar[z[i + 1]] == 0)) { + (isIdChar[(uint8_t)z[i + 1]] == 0)) { *tokenType = TK_VARIABLE; i += 1; return i; @@ -523,7 +527,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { } if (seg == 4) { // ip address - *tokenType = TK_IP; + *tokenType = TK_IPTOKEN; return i; } @@ -547,7 +551,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { case 't': case 'F': case 'f': { - for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[z[i]]; i++) { + for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[(uint8_t) z[i]]; i++) { } if ((i == 4 && strncasecmp(z, "true", 4) == 0) || (i == 5 && strncasecmp(z, "false", 5) == 0)) { @@ -556,10 +560,10 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { } } default: { - if (((*z & 0x80) != 0) || !isIdChar[*z]) { + if (((*z & 0x80) != 0) || !isIdChar[(uint8_t) *z]) { break; } - for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[z[i]]; i++) { + for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[(uint8_t) z[i]]; i++) { } *tokenType = tSQLKeywordCode(z, i); return i; diff --git a/src/util/src/ttypes.c b/src/util/src/ttypes.c index 3b36b1b31c94f6e9681eced3f721f0d3e2a15056..b048748d95926a3d566b7dc82aecdd6dcc936eae 100644 --- a/src/util/src/ttypes.c +++ b/src/util/src/ttypes.c @@ -12,20 +12,9 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - -#include -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taos.h" #include "tsdb.h" -#include "tsql.h" #include "tsqldef.h" #include "ttypes.h" #include "tutil.h" @@ -76,6 +65,8 @@ bool isValidDataType(int32_t type, int32_t length) { void tVariantCreate(tVariant *pVar, SSQLToken *token) { tVariantCreateFromString(pVar, token->z, token->n, token->type); } void tVariantCreateFromString(tVariant *pVar, char *pz, uint32_t len, uint32_t type) { + memset(pVar, 0, sizeof(tVariant)); + switch (type) { case TSDB_DATA_TYPE_BOOL: { int32_t k = strncasecmp(pz, "true", 4); @@ -148,7 +139,7 @@ void tVariantCreateFromBinary(tVariant *pVar, char *pz, uint32_t len, uint32_t t } case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length pVar->nLen = len / TSDB_NCHAR_SIZE; - pVar->wpz = malloc((pVar->nLen + 1) * TSDB_NCHAR_SIZE); + pVar->wpz = calloc(1, (pVar->nLen + 1) * TSDB_NCHAR_SIZE); wcsncpy(pVar->wpz, (wchar_t *)pz, pVar->nLen); pVar->wpz[pVar->nLen] = 0; @@ -221,7 +212,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { return sprintf(dst, "%d", (int32_t)pVar->i64Key); case TSDB_DATA_TYPE_BIGINT: - return sprintf(dst, "%lld", pVar->i64Key); + return sprintf(dst, "%" PRId64, pVar->i64Key); case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_DOUBLE: @@ -232,6 +223,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { } } +#if 0 static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type, bool releaseVariantPtr) { if (pVariant->nType == TSDB_DATA_TYPE_NULL) { setNull(pDest, type, tDataTypeDesc[type].nSize); @@ -345,7 +337,7 @@ static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type, return 0; } - +#endif static FORCE_INLINE int32_t convertToBoolImpl(char *pStr, int32_t len) { if ((strncasecmp(pStr, "true", len) == 0) && (len == 4)) { return TSDB_TRUE; @@ -394,7 +386,7 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { } else { if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { - sprintf(pBuf == NULL ? *pDest : pBuf, "%lld", pVariant->i64Key); + sprintf(pBuf == NULL ? *pDest : pBuf, "%" PRId64, pVariant->i64Key); } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { sprintf(pBuf == NULL ? *pDest : pBuf, "%lf", pVariant->dKey); } else if (pVariant->nType == TSDB_DATA_TYPE_BOOL) { @@ -419,7 +411,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { int32_t nLen = 0; if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { - nLen = sprintf(pDst, "%lld", pVariant->i64Key); + nLen = sprintf(pDst, "%" PRId64, pVariant->i64Key); } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { nLen = sprintf(pDst, "%lf", pVariant->dKey); } else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { @@ -445,7 +437,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { char* tmp = realloc(pVariant->wpz, (*pDestSize + 1)*TSDB_NCHAR_SIZE); assert(tmp != NULL); - pVariant->wpz = tmp; + pVariant->wpz = (wchar_t *)tmp; } else { taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE); } @@ -978,7 +970,7 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { } } -void assignVal(char *val, char *src, int32_t len, int32_t type) { +void assignVal(char *val, const char *src, int32_t len, int32_t type) { switch (type) { case TSDB_DATA_TYPE_INT: { *((int32_t *)val) = GET_INT32_VAL(src); @@ -1006,6 +998,14 @@ void assignVal(char *val, char *src, int32_t len, int32_t type) { *((int8_t *)val) = GET_INT8_VAL(src); break; }; + case TSDB_DATA_TYPE_BINARY: { + strncpy(val, src, len); + break; + }; + case TSDB_DATA_TYPE_NCHAR: { + wcsncpy((wchar_t*)val, (wchar_t*)src, len / TSDB_NCHAR_SIZE); + break; + }; default: { memcpy(val, src, len); break; diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index 805f27a3813fccbe36a26aa47358a01ee22f9c33..cdd017fb564d1f11703b4608cfb85c0865f060c6 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -13,14 +13,6 @@ * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #ifdef USE_LIBICONV @@ -32,6 +24,8 @@ #include "ttime.h" #include "ttypes.h" #include "tutil.h" +#include "tlog.h" +#include "taoserror.h" int32_t strdequote(char *z) { if (z == NULL) { @@ -114,6 +108,7 @@ char **strsplit(char *z, const char *delim, int32_t *num) { if ((*num) >= size) { size = (size << 1); split = realloc(split, POINTER_BYTES * size); + assert(NULL != split); } } @@ -406,13 +401,6 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP return rename(fullPath, *dstPath); } -bool taosCheckDbName(char *db, char *monitordb) { - char *pos = strchr(db, '.'); - if (pos == NULL) return false; - - return strncasecmp(pos + 1, monitordb, strlen(monitordb)) == 0; -} - bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs) { #ifdef USE_LIBICONV iconv_t cd = iconv_open(tsCharset, DEFAULT_UNICODE_ENCODEC); @@ -458,17 +446,15 @@ bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len) #endif } -bool taosValidateEncodec(char *encodec) { +bool taosValidateEncodec(const char *encodec) { #ifdef USE_LIBICONV iconv_t cd = iconv_open(encodec, DEFAULT_UNICODE_ENCODEC); if (cd == (iconv_t)(-1)) { return false; } iconv_close(cd); - return true; -#else - return true; #endif + return true; } bool taosGetVersionNumber(char *versionStr, int *versionNubmer) { @@ -500,12 +486,41 @@ bool taosGetVersionNumber(char *versionStr, int *versionNubmer) { return true; } +int taosCheckVersion(char *input_client_version, char *input_server_version, int comparedSegments) { + char client_version[64] = {0}; + char server_version[64] = {0}; + int clientVersionNumber[4] = {0}; + int serverVersionNumber[4] = {0}; + + strcpy(client_version, input_client_version); + strcpy(server_version, input_server_version); + + if (!taosGetVersionNumber(client_version, clientVersionNumber)) { + pError("invalid client version:%s", client_version); + return TSDB_CODE_INVALID_CLIENT_VERSION; + } + + if (!taosGetVersionNumber(server_version, serverVersionNumber)) { + pError("invalid server version:%s", server_version); + return TSDB_CODE_INVALID_CLIENT_VERSION; + } + + for(int32_t i = 0; i < comparedSegments; ++i) { + if (clientVersionNumber[i] != serverVersionNumber[i]) { + tscError("the %d-th number of server version:%s not matched with client version:%s", i, server_version, version); + return TSDB_CODE_INVALID_CLIENT_VERSION; + } + } + + return 0; +} + char *taosIpStr(uint32_t ipInt) { static char ipStrArray[3][30]; static int ipStrIndex = 0; char *ipStr = ipStrArray[(ipStrIndex++) % 3]; - sprintf(ipStr, "0x%x:%d.%d.%d.%d", ipInt, ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, ipInt >> 24); + sprintf(ipStr, "0x%x:%u.%u.%u.%u", ipInt, ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24)); return ipStr; } @@ -524,3 +539,22 @@ FORCE_INLINE double taos_align_get_double(char* pBuf) { *(int64_t*)(&dv) = *(int64_t*)pBuf; return dv; } + +typedef struct CharsetPair { + char *oldCharset; + char *newCharset; +} CharsetPair; + +char *taosCharsetReplace(char *charsetstr) { + CharsetPair charsetRep[] = { + { "utf8", "UTF-8" }, { "936", "CP936" }, + }; + + for (int32_t i = 0; i < tListLen(charsetRep); ++i) { + if (strcasecmp(charsetRep[i].oldCharset, charsetstr) == 0) { + return strdup(charsetRep[i].newCharset); + } + } + + return strdup(charsetstr); +} diff --git a/src/util/src/version.c b/src/util/src/version.c index dc2c3c5480e871961cb350d06acf2da51d6006cc..c85289fb8ad2f661ec4786c20b3e8ec6207b561d 100644 --- a/src/util/src/version.c +++ b/src/util/src/version.c @@ -1,4 +1,5 @@ -char version[64] = "1.6.4.0"; +char version[64] = "1.6.4.4"; char compatible_version[64] = "1.6.1.0"; -char gitinfo[128] = "869171d2331eb25ba0901e88d33ae627bf5a9d91"; -char buildinfo[512] = "Built by ubuntu at 2019-11-07 22:31"; +char gitinfo[128] = "d62c5c30231d04a736d437cf428af6e12599bd9f"; +char gitinfoOfInternal[128] = "8094a32d78dc519bd883d01ac2ba6ec49ac57a80"; +char buildinfo[512] = "Built by ubuntu at 2019-12-16 21:40"; diff --git a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml index 9271d478a64f01d940af9875e8cb291bdfa53459..6f5fa6e132ea6d7a7cc6b0322dcd85f9183291f5 100644 --- a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml +++ b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml @@ -118,7 +118,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.10 + 2.9.10.1 diff --git a/tests/examples/JDBC/readme.md b/tests/examples/JDBC/JDBCDemo/readme.md similarity index 89% rename from tests/examples/JDBC/readme.md rename to tests/examples/JDBC/JDBCDemo/readme.md index 54a2e5c6ac96b2480d9b1f438d8e11312b09d798..a91624a9e47a3015d88e2e9aa9f62cf8dd0672cc 100644 --- a/tests/examples/JDBC/readme.md +++ b/tests/examples/JDBC/JDBCDemo/readme.md @@ -9,5 +9,5 @@ TDengine's JDBC driver jar is not yet published to maven center repo, so we need ## Compile the Demo Code and Run It To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute
      mvn clean assembly:single package
      -The ``pom.xml`` is configured to package all the dependencies into one executable jar file. To run it, go to ``TDengine/tests/examples/JDBC/JDBCDemo/target`` and execute +The ``pom.xml`` is configured to package all the dependencies into one executable jar file. To run it, go to ``examples/JDBC/JDBCDemo/target`` and execute
      java -jar jdbcdemo-1.0-SNAPSHOT-jar-with-dependencies.jar
      diff --git a/tests/examples/JDBC/SpringJdbcTemplate/.gitignore b/tests/examples/JDBC/SpringJdbcTemplate/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..175de5c653d2f49b2ad1227764e60f741110592d --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/.gitignore @@ -0,0 +1,31 @@ +HELP.md +target/ +.mvn/ +!**/src/main/** +!**/src/test/** + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..45abc5354abf623082f6f317c19c9adedf17a097 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -0,0 +1,85 @@ + + + + 4.0.0 + + com.taosdata.jdbc + SpringJdbcTemplate + 1.0-SNAPSHOT + + SpringJdbcTemplate + http://www.taosdata.com + + + UTF-8 + 1.8 + 1.8 + + + + + + org.springframework + spring-context + 4.3.2.RELEASE + + + + org.springframework + spring-jdbc + 4.3.2.RELEASE + + + + junit + junit + 4.11 + test + + + + com.taosdata.jdbc + taos-jdbcdriver + 1.0.3 + + + + + + + + maven-compiler-plugin + 3.8.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.1.0 + + + + com.taosdata.jdbc.App + + + + jar-with-dependencies + + + + + make-assembly + package + + single + + + + + + + diff --git a/tests/examples/JDBC/SpringJdbcTemplate/readme.md b/tests/examples/JDBC/SpringJdbcTemplate/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..1fe8809b506c248f226edd0f3200c6e352c0a73b --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/readme.md @@ -0,0 +1,34 @@ + +## TDengine Spring JDBC Template Demo + +`Spring JDBC Template` 简化了原生 JDBC Connection 获取释放等操作,使得操作数据库更加方便。 + +### 配置 + +修改 `src/main/resources/applicationContext.xml` 文件中 TDengine 的配置信息: + +```xml + + + + + + + + + + + + +``` + +### 打包运行 + +进入 `TDengine/tests/examples/JDBC/SpringJdbcTemplate` 目录下,执行以下命令可以生成可执行 jar 包。 +```shell +mvn clean package +``` +打包成功之后,进入 `target/` 目录下,执行以下命令就可运行测试: +```shell +java -jar SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar +``` \ No newline at end of file diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java new file mode 100644 index 0000000000000000000000000000000000000000..3230af46a8016fee3d58c89ea3b2c1ddcf39cea5 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java @@ -0,0 +1,44 @@ +package com.taosdata.jdbc; + + +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.util.CollectionUtils; + +import java.util.List; +import java.util.Map; + +public class App { + + public static void main( String[] args ) { + + ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml"); + + JdbcTemplate jdbcTemplate = (JdbcTemplate) ctx.getBean("jdbcTemplate"); + + // create database + jdbcTemplate.execute("create database if not exists db "); + + // create table + jdbcTemplate.execute("create table if not exists db.tb (ts timestamp, temperature int, humidity float)"); + + String insertSql = "insert into db.tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"; + + // insert rows + int affectedRows = jdbcTemplate.update(insertSql); + + System.out.println("insert success " + affectedRows + " rows."); + + // query for list + List> resultList = jdbcTemplate.queryForList("select * from db.tb"); + + if(!CollectionUtils.isEmpty(resultList)){ + for (Map row : resultList){ + System.out.printf("%s, %d, %s\n", row.get("ts"), row.get("temperature"), row.get("humidity")); + } + } + + } + +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml b/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml new file mode 100644 index 0000000000000000000000000000000000000000..41128148ec3fb69f342c634cc8e9dd9fbd3c0037 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java new file mode 100644 index 0000000000000000000000000000000000000000..d6a699598e73470663af4eb04e03a9a6b083bc4c --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java @@ -0,0 +1,20 @@ +package com.taosdata.jdbc; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +/** + * Unit test for simple App. + */ +public class AppTest +{ + /** + * Rigorous Test :-) + */ + @Test + public void shouldAnswerWithTrue() + { + assertTrue( true ); + } +} diff --git a/tests/examples/JDBC/springbootdemo/.gitignore b/tests/examples/JDBC/springbootdemo/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b8a47adccb623c653c547481ff9d3221210f31ef --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/.gitignore @@ -0,0 +1,30 @@ +.mvn/ +target/ +!**/src/main/** +!**/src/test/** + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/springbootdemo/.mvn/wrapper/MavenWrapperDownloader.java b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000000000000000000000000000000000..74f4de40122aca522184d5b1aac4f0ac29888b1a --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,118 @@ +/* + * Copyright 2012-2019 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.5"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if (mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if (mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.jar b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..0d5e649888a4843c1520054d9672f80c62ebbb48 Binary files /dev/null and b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.jar differ diff --git a/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.properties b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000000000000000000000000000000000..7d59a01f2594defa27705a493da0e4d57465aa2d --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.2/apache-maven-3.6.2-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar diff --git a/tests/examples/JDBC/springbootdemo/mvnw b/tests/examples/JDBC/springbootdemo/mvnw new file mode 100755 index 0000000000000000000000000000000000000000..21d3ee84568ff68c4712677da7c3b06f61ab5543 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/mvnw @@ -0,0 +1,310 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/tests/examples/JDBC/springbootdemo/mvnw.cmd b/tests/examples/JDBC/springbootdemo/mvnw.cmd new file mode 100644 index 0000000000000000000000000000000000000000..84d60abc339b13f80f3300b00387f2d4cc4eb328 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..74522979c068120ac175f324dced6e8cd66ca1d8 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -0,0 +1,87 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 2.2.1.RELEASE + + + com.taosdata.jdbc + springbootdemo + 0.0.1-SNAPSHOT + springbootdemo + Demo project for using tdengine with Spring Boot + + + 1.8 + + + + + org.springframework.boot + spring-boot-starter-data-jdbc + + + org.springframework.boot + spring-boot-starter-thymeleaf + + + org.springframework.boot + spring-boot-starter-web + + + org.mybatis.spring.boot + mybatis-spring-boot-starter + 2.1.1 + + + + org.springframework.boot + spring-boot-devtools + runtime + true + + + org.springframework.boot + spring-boot-configuration-processor + true + + + org.springframework.boot + spring-boot-starter-test + test + + + org.junit.vintage + junit-vintage-engine + + + + + + com.taosdata.jdbc + taos-jdbcdriver + 1.0.3 + + + + com.alibaba + druid-spring-boot-starter + 1.1.17 + + + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + diff --git a/tests/examples/JDBC/springbootdemo/readme.md b/tests/examples/JDBC/springbootdemo/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..64aabedcdce5d16a610f4f2b084b8d62d54ff133 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/readme.md @@ -0,0 +1,96 @@ +## TDengine SpringBoot + Mybatis Demo + +### 配置 application.properties +```properties +# datasource config +spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver +spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.username=root +spring.datasource.password=taosdata + +spring.datasource.druid.initial-size=5 +spring.datasource.druid.min-idle=5 +spring.datasource.druid.max-active=5 +# max wait time for get connection, ms +spring.datasource.druid.max-wait=60000 + +spring.datasource.druid.validation-query=describe log.dn +spring.datasource.druid.validation-query-timeout=5000 +spring.datasource.druid.test-on-borrow=false +spring.datasource.druid.test-on-return=false +spring.datasource.druid.test-while-idle=true +spring.datasource.druid.time-between-eviction-runs-millis=60000 +spring.datasource.druid.min-evictable-idle-time-millis=600000 +spring.datasource.druid.max-evictable-idle-time-millis=900000 + +# mybatis +mybatis.mapper-locations=classpath:mapper/*.xml + +# log +logging.level.com.taosdata.jdbc.springbootdemo.dao=debug +``` + +### 主要功能 + +* 创建数据库和表 +```xml + + + create database if not exists test; + + + + create table if not exists test.weather(ts timestamp, temperature int, humidity float); + +``` + +* 插入单条记录 +```xml + + + insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT}) + +``` +* 插入多条记录 +```xml + + + insert into test.weather (ts, temperature, humidity) values + + (now + #{index}a, #{weather.temperature}, #{weather.humidity}) + + +``` +* 分页查询 +```xml + + + + + + + + + + + + + + ts, temperature, humidity + + + + +``` + diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java new file mode 100644 index 0000000000000000000000000000000000000000..69cd3e0ced2888575d890ffea36407455c4bea7a --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java @@ -0,0 +1,15 @@ +package com.taosdata.jdbc.springbootdemo; + +import org.mybatis.spring.annotation.MapperScan; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@MapperScan(basePackages = {"com.taosdata.jdbc.springbootdemo.dao"}) +@SpringBootApplication +public class SpringbootdemoApplication { + + public static void main(String[] args) { + SpringApplication.run(SpringbootdemoApplication.class, args); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java new file mode 100644 index 0000000000000000000000000000000000000000..9123abd97b82fe1d4267c7341f3ea87bd5127caa --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java @@ -0,0 +1,60 @@ +package com.taosdata.jdbc.springbootdemo.controller; + +import com.taosdata.jdbc.springbootdemo.domain.Weather; +import com.taosdata.jdbc.springbootdemo.service.WeatherService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.*; + +import java.util.List; + +@RequestMapping("/weather") +@RestController +public class WeatherController { + + @Autowired + private WeatherService weatherService; + + /** + * create database and table + * @return + */ + @GetMapping("/init") + public boolean init(){ + return weatherService.init(); + } + + /** + * Pagination Query + * @param limit + * @param offset + * @return + */ + @GetMapping("/{limit}/{offset}") + public List queryWeather(@PathVariable Long limit, @PathVariable Long offset){ + return weatherService.query(limit, offset); + } + + /** + * upload single weather info + * @param temperature + * @param humidity + * @return + */ + @PostMapping("/{temperature}/{humidity}") + public int saveWeather(@PathVariable int temperature, @PathVariable float humidity){ + + return weatherService.save(temperature, humidity); + } + + /** + * upload multi weather info + * @param weatherList + * @return + */ + @PostMapping("/batch") + public int batchSaveWeather(@RequestBody List weatherList){ + + return weatherService.save(weatherList); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.java new file mode 100644 index 0000000000000000000000000000000000000000..1e3db1f49106606c412851c0a74ad382adea68fb --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.java @@ -0,0 +1,19 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import com.taosdata.jdbc.springbootdemo.domain.Weather; +import org.apache.ibatis.annotations.Param; + +import java.util.List; + +public interface WeatherMapper { + + int insert(Weather weather); + + int batchInsert(List weatherList); + + List select(@Param("limit") Long limit, @Param("offset")Long offset); + + void createDB(); + + void createTable(); +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java new file mode 100644 index 0000000000000000000000000000000000000000..9547a8a89bf4aaff73696091e54f5bb460dcb796 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java @@ -0,0 +1,36 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +import java.sql.Timestamp; + +public class Weather { + + private Timestamp ts; + + private int temperature; + + private float humidity; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public int getTemperature() { + return temperature; + } + + public void setTemperature(int temperature) { + this.temperature = temperature; + } + + public float getHumidity() { + return humidity; + } + + public void setHumidity(float humidity) { + this.humidity = humidity; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java new file mode 100644 index 0000000000000000000000000000000000000000..396d70bf9246bfd7e293cccec5b00f2d4aac4963 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java @@ -0,0 +1,40 @@ +package com.taosdata.jdbc.springbootdemo.service; + +import com.taosdata.jdbc.springbootdemo.dao.WeatherMapper; +import com.taosdata.jdbc.springbootdemo.domain.Weather; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.List; + +@Service +public class WeatherService { + + @Autowired + private WeatherMapper weatherMapper; + + public boolean init() { + + weatherMapper.createDB(); + weatherMapper.createTable(); + + return true; + } + + public List query(Long limit, Long offset) { + return weatherMapper.select(limit, offset); + } + + public int save(int temperature, float humidity) { + Weather weather = new Weather(); + weather.setTemperature(temperature); + weather.setHumidity(humidity); + + return weatherMapper.insert(weather); + } + + public int save(List weatherList) { + return weatherMapper.batchInsert(weatherList); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties new file mode 100644 index 0000000000000000000000000000000000000000..dc77e144f0ffbe131f7eda69b7bb66fd7870c05e --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -0,0 +1,26 @@ +# datasource config +spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver +spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.username=root +spring.datasource.password=taosdata + +spring.datasource.druid.initial-size=5 +spring.datasource.druid.min-idle=5 +spring.datasource.druid.max-active=5 +# max wait time for get connection, ms +spring.datasource.druid.max-wait=60000 + +spring.datasource.druid.validation-query=describe log.dn +spring.datasource.druid.validation-query-timeout=5000 +spring.datasource.druid.test-on-borrow=false +spring.datasource.druid.test-on-return=false +spring.datasource.druid.test-while-idle=true +spring.datasource.druid.time-between-eviction-runs-millis=60000 +spring.datasource.druid.min-evictable-idle-time-millis=600000 +spring.datasource.druid.max-evictable-idle-time-millis=900000 + + +#mybatis +mybatis.mapper-locations=classpath:mapper/*.xml + +logging.level.com.taosdata.jdbc.springbootdemo.dao=debug \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml new file mode 100644 index 0000000000000000000000000000000000000000..e894f9a6583d271d8ce526e9afe79528f0fd5490 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + create database if not exists test; + + + + create table if not exists test.weather(ts timestamp, temperature int, humidity float); + + + + ts, temperature, humidity + + + + + + insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT}) + + + + insert into test.weather (ts, temperature, humidity) values + + (now + #{index}a, #{weather.temperature}, #{weather.humidity}) + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/test/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplicationTests.java b/tests/examples/JDBC/springbootdemo/src/test/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplicationTests.java new file mode 100644 index 0000000000000000000000000000000000000000..23a7420dab24a15d9d24341839ba58caa9acb4b9 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/test/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplicationTests.java @@ -0,0 +1,13 @@ +package com.taosdata.jdbc.springbootdemo; + +import org.junit.jupiter.api.Test; +import org.springframework.boot.test.context.SpringBootTest; + +@SpringBootTest +class SpringbootdemoApplicationTests { + + @Test + void contextLoads() { + } + +} diff --git a/tests/examples/lua/build.sh b/tests/examples/lua/build.sh old mode 100644 new mode 100755 diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c index f37657e822cf819a9a5877858a1c216f11ea4296..f4065bb27452fba0e4cccbd8d7024e68579f5637 100644 --- a/tests/examples/lua/lua_connector.c +++ b/tests/examples/lua/lua_connector.c @@ -7,8 +7,15 @@ #include #include -static int l_connect(lua_State *L) -{ +struct cb_param{ + lua_State* state; + int callback; + void * stream; +}; + + + +static int l_connect(lua_State *L){ TAOS * taos; char *host = lua_tostring(L, 1); char *user = lua_tostring(L, 2); @@ -29,6 +36,7 @@ static int l_connect(lua_State *L) lua_pushstring(L, taos_errstr(taos)); lua_setfield(L, table_index, "error"); lua_pushlightuserdata(L,NULL); + lua_setfield(L, table_index, "conn"); }else{ printf("success to connect server\n"); lua_pushnumber(L, 0); @@ -49,7 +57,7 @@ static int l_query(lua_State *L){ lua_newtable(L); int table_index = lua_gettop(L); - printf("receive command:%s\r\n",s); + // printf("receive command:%s\r\n",s); if(taos_query(taos, s)!=0){ printf("failed, reason:%s\n", taos_errstr(taos)); lua_pushnumber(L, -1); @@ -78,8 +86,12 @@ static int l_query(lua_State *L){ TAOS_FIELD *fields = taos_fetch_fields(result); char temp[256]; + int affectRows = taos_affected_rows(taos); + // printf(" affect rows:%d\r\n", affectRows); lua_pushnumber(L, 0); lua_setfield(L, table_index, "code"); + lua_pushinteger(L, affectRows); + lua_setfield(L, table_index, "affected"); lua_newtable(L); while ((row = taos_fetch_row(result))) { @@ -95,7 +107,7 @@ static int l_query(lua_State *L){ } lua_pushstring(L,fields[i].name); - //printf("field name:%s,type:%d\n",fields[i].name,fields[i].type); + switch (fields[i].type) { case TSDB_DATA_TYPE_TINYINT: lua_pushinteger(L,*((char *)row[i])); @@ -142,6 +154,115 @@ static int l_query(lua_State *L){ return 1; } +void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){ + + struct cb_param* p = (struct cb_param*) param; + TAOS_FIELD *fields = taos_fetch_fields(result); + int numFields = taos_num_fields(result); + + printf("\n\r-----------------------------------------------------------------------------------\n"); + + // printf("r:%d, L:%d\n",p->callback, p->state); + + lua_State *L = p->state; + lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback); + + lua_newtable(L); + + for (int i = 0; i < numFields; ++i) { + if (row[i] == NULL) { + continue; + } + + lua_pushstring(L,fields[i].name); + + switch (fields[i].type) { + case TSDB_DATA_TYPE_TINYINT: + lua_pushinteger(L,*((char *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + lua_pushinteger(L,*((short *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + lua_pushinteger(L,*((int *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + lua_pushinteger(L,*((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + lua_pushnumber(L,*((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + lua_pushnumber(L,*((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + lua_pushstring(L,(char *)row[i]); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + lua_pushinteger(L,*((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_BOOL: + lua_pushinteger(L,*((char *)row[i])); + break; + default: + lua_pushnil(L); + break; + } + + lua_settable(L, -3); + } + + lua_call(L, 1, 0); + + printf("-----------------------------------------------------------------------------------\n\r"); +} + +static int l_open_stream(lua_State *L){ + int r = luaL_ref(L, LUA_REGISTRYINDEX); + TAOS * taos = lua_topointer(L,1); + char * sqlstr = lua_tostring(L,2); + int stime = luaL_checknumber(L,3); + + lua_newtable(L); + int table_index = lua_gettop(L); + + struct cb_param *p = malloc(sizeof(struct cb_param)); + p->state = L; + p->callback=r; + // printf("r:%d, L:%d\n",r,L); + void * s = taos_open_stream(taos,sqlstr,stream_cb,stime,p,NULL); + if (s == NULL) { + printf("failed to open stream, reason:%s\n", taos_errstr(taos)); + free(p); + lua_pushnumber(L, -1); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, taos_errstr(taos)); + lua_setfield(L, table_index, "error"); + lua_pushlightuserdata(L,NULL); + lua_setfield(L, table_index, "stream"); + }else{ + // printf("success to open stream\n"); + lua_pushnumber(L, 0); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, taos_errstr(taos)); + lua_setfield(L, table_index, "error"); + p->stream = s; + lua_pushlightuserdata(L,p); + lua_setfield(L, table_index, "stream");//stream has different content in lua and c. + } + + return 1; +} + +static int l_close_stream(lua_State *L){ + //TODO:get stream and free cb_param + struct cb_param *p = lua_touserdata(L,1); + taos_close_stream(p->stream); + free(p); + return 0; +} + static int l_close(lua_State *L){ TAOS * taos= lua_topointer(L,1); lua_newtable(L); @@ -166,6 +287,8 @@ static const struct luaL_Reg lib[] = { {"connect", l_connect}, {"query", l_query}, {"close", l_close}, + {"open_stream", l_open_stream}, + {"close_stream", l_close_stream}, {NULL, NULL} }; diff --git a/tests/examples/lua/test.lua b/tests/examples/lua/test.lua index f644b82dd409ea2408be9be7c194f965f96cc82a..38ae1c82f2b1bbcb473257200241fb20ab44878b 100644 --- a/tests/examples/lua/test.lua +++ b/tests/examples/lua/test.lua @@ -35,10 +35,12 @@ if res.code ~=0 then return end -res = driver.query(conn,"insert into m1 values (1592222222222,0,'robotspace'), (1592222222223,1,'Hilink'),(1592222222224,2,'Harmony')") +res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')") if res.code ~=0 then print(res.error) return +else + print("insert successfully, affected:"..res.affected) end res = driver.query(conn,"select * from m1") @@ -55,4 +57,69 @@ else end end +res = driver.query(conn,"CREATE TABLE thermometer (ts timestamp, degree double) TAGS(location binary(20), type int)") +if res.code ~=0 then + print(res.error) + return +end +res = driver.query(conn,"CREATE TABLE therm1 USING thermometer TAGS ('beijing', 1)") +if res.code ~=0 then + print(res.error) + return +end +res = driver.query(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)") + +if res.code ~=0 then + print(res.error) + return +else + print("insert successfully, affected:"..res.affected) +end + +res = driver.query(conn,"SELECT COUNT(*) count, AVG(degree) AS av, MAX(degree), MIN(degree) FROM thermometer WHERE location='beijing' or location='tianjin' GROUP BY location, type") +if res.code ~=0 then + print("select error:"..res.error) + return +else + print("in lua, result:") + for i = 1, #(res.item) do + print("res:"..res.item[i].count) + end +end + +function callback(t) + print("continuous query result:") + for key, value in pairs(t) do + print("key:"..key..", value:"..value) + end +end + +local stream +res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0,callback) +if res.code ~=0 then + print("open stream error:"..res.error) + return +else + print("openstream ok") + stream = res.stream +end + +--From now on we begin continous query in an definite (infinite if you want) loop. +local loop_index = 0 +while loop_index < 20 do + local t = os.time()*1000 + local v = loop_index + res = driver.query(conn,string.format("INSERT INTO therm1 VALUES (%d, %d)",t,v)) + + if res.code ~=0 then + print(res.error) + return + else + print("insert successfully, affected:"..res.affected) + end + os.execute("sleep " .. 1) + loop_index = loop_index + 1 +end + +driver.close_stream(stream) driver.close(conn)