diff --git a/CMakeLists.txt b/CMakeLists.txt index a1b2c16f4637f6500eab1a255fd45f1364483377..553da9245bc5d805b9a95cc2120d6b6783da2b30 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,10 +26,17 @@ SET(CMAKE_VERBOSE_MAKEFILE ON) # open the file named TDengine.sln # +SET(TD_GODLL FALSE) +IF (${DLLTYPE} MATCHES "go") + ADD_DEFINITIONS(-D_TD_GO_DLL_) + MESSAGE(STATUS "input dll type: " ${DLLTYPE}) + SET(TD_GODLL TRUE) +ENDIF () + IF (NOT DEFINED TD_CLUSTER) MESSAGE(STATUS "Build the Lite Version") SET(TD_CLUSTER FALSE) - SET(TD_LITE TRUE) + SET(TD_EDGE TRUE) SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR}) MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR}) @@ -41,34 +48,49 @@ IF (NOT DEFINED TD_CLUSTER) SET(TD_ARM FALSE) SET(TD_ARM_64 FALSE) SET(TD_ARM_32 FALSE) + SET(TD_MIPS FALSE) SET(TD_MIPS_64 FALSE) + SET(TD_MIPS_32 FALSE) SET(TD_DARWIN_64 FALSE) SET(TD_WINDOWS_64 FALSE) + SET(TD_PAGMODE_LITE FALSE) + + IF (${PAGMODE} MATCHES "lite") + SET(TD_PAGMODE_LITE TRUE) + ENDIF () # if generate ARM version: - # cmake -DARMVER=arm32 .. or cmake -DARMVER=arm64 - IF (${ARMVER} MATCHES "arm32") + # cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 + IF (${CPUTYPE} MATCHES "aarch32") SET(TD_ARM TRUE) SET(TD_ARM_32 TRUE) + SET(TD_PAGMODE_LITE TRUE) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-D_TD_ARM_32_) - ELSEIF (${ARMVER} MATCHES "arm64") + ELSEIF (${CPUTYPE} MATCHES "aarch64") SET(TD_ARM TRUE) SET(TD_ARM_64 TRUE) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-D_TD_ARM_64_) + ELSEIF (${CPUTYPE} MATCHES "mips64") + SET(TD_MIPS TRUE) + SET(TD_MIPS_64 TRUE) + ADD_DEFINITIONS(-D_TD_MIPS_) + ADD_DEFINITIONS(-D_TD_MIPS_64_) + ELSEIF (${CPUTYPE} MATCHES "x64") + MESSAGE(STATUS "input cpuType: " ${CPUTYPE}) + ELSEIF (${CPUTYPE} MATCHES "x86") + MESSAGE(STATUS "input cpuType: " ${CPUTYPE}) + ELSE () + MESSAGE(STATUS "input cpuType: " ${CPUTYPE}) ENDIF () - IF (TD_ARM) - ADD_DEFINITIONS(-D_TD_ARM_) - IF (TD_ARM_32) - ADD_DEFINITIONS(-D_TD_ARM_32_) - ELSEIF (TD_ARM_64) - ADD_DEFINITIONS(-D_TD_ARM_64_) - ELSE () - EXIT () - ENDIF () - ENDIF () + # + # Get OS information and store in variable TD_OS_INFO. + # + execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh) + execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO) + MESSAGE(STATUS "The current os is " ${TD_OS_INFO}) IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") IF (${CMAKE_SIZEOF_VOID_P} MATCHES 8) @@ -76,17 +98,17 @@ IF (NOT DEFINED TD_CLUSTER) SET(TD_OS_DIR ${TD_COMMUNITY_DIR}/src/os/linux) ADD_DEFINITIONS(-D_M_X64) MESSAGE(STATUS "The current platform is Linux 64-bit") - ELSEIF (${CMAKE_SIZEOF_VOID_P} MATCHES 4) - IF (TD_ARM) - SET(TD_LINUX_32 TRUE) - SET(TD_OS_DIR ${TD_COMMUNITY_DIR}/src/os/linux) - #ADD_DEFINITIONS(-D_M_IX86) - MESSAGE(STATUS "The current platform is Linux 32-bit") + ELSEIF (${CMAKE_SIZEOF_VOID_P} MATCHES 4) + IF (TD_ARM) + SET(TD_LINUX_32 TRUE) + SET(TD_OS_DIR ${TD_COMMUNITY_DIR}/src/os/linux) + #ADD_DEFINITIONS(-D_M_IX86) + MESSAGE(STATUS "The current platform is Linux 32-bit") + ELSE () + MESSAGE(FATAL_ERROR "The current platform is Linux 32-bit, but no ARM not supported yet") + EXIT () + ENDIF () ELSE () - MESSAGE(FATAL_ERROR "The current platform is Linux 32-bit, but no ARM not supported yet") - EXIT () - ENDIF () - ELSE () MESSAGE(FATAL_ERROR "The current platform is Linux neither 32-bit nor 64-bit, not supported yet") EXIT () ENDIF () @@ -141,37 +163,51 @@ IF (NOT DEFINED TD_CLUSTER) SET(RELEASE_FLAGS "-O0") IF (NOT TD_ARM) IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ELSE () - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () ELSE () - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) + IF (${TD_OS_INFO} MATCHES "Alpine") + MESSAGE(STATUS "The current OS is Alpine, append extra flags") + SET(COMMON_FLAGS "${COMMON_FLAGS} -largp") + link_libraries(/usr/lib/libargp.a) + ADD_DEFINITIONS(-D_ALPINE) + ENDIF () ELSEIF (TD_LINUX_32) IF (NOT TD_ARM) EXIT () ENDIF () SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) ADD_DEFINITIONS(-DUSE_LIBICONV) + IF (${TD_OS_INFO} MATCHES "Alpine") + MESSAGE(STATUS "The current OS is Alpine, add extra flags") + SET(COMMON_FLAGS "${COMMON_FLAGS} -largp") + link_library(/usr/lib/libargp.a) + ADD_DEFINITIONS(-D_ALPINE) + ENDIF () ELSEIF (TD_WINDOWS_64) SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE) - SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") - SET(DEBUG_FLAGS "/Zi /W3 /GL") - SET(RELEASE_FLAGS "/W0 /GL") + IF (NOT TD_GODLL) + SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") + SET(DEBUG_FLAGS "/Zi /W3 /GL") + SET(RELEASE_FLAGS "/W0 /GL") + ENDIF () ADD_DEFINITIONS(-DWINDOWS) ADD_DEFINITIONS(-D__CLEANUP_C) ADD_DEFINITIONS(-DPTW32_STATIC_LIB) ADD_DEFINITIONS(-DPTW32_BUILD) ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) ELSEIF (TD_DARWIN_64) - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-unused-variable -Wno-bitfield-constant-conversion") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") ADD_DEFINITIONS(-DDARWIN) @@ -230,21 +266,31 @@ IF (NOT DEFINED TD_CLUSTER) INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") ELSEIF (TD_WINDOWS_64) SET(CMAKE_INSTALL_PREFIX C:/TDengine) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) - INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - #INSTALL(TARGETS taos RUNTIME DESTINATION driver) - #INSTALL(TARGETS shell RUNTIME DESTINATION .) - IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-1.0.2-dist.jar DESTINATION connector/jdbc) + IF (NOT TD_GODLL) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) + INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) + #INSTALL(TARGETS taos RUNTIME DESTINATION driver) + #INSTALL(TARGETS shell RUNTIME DESTINATION .) + IF (TD_MVN_INSTALLED) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-1.0.2-dist.jar DESTINATION connector/jdbc) + ENDIF () + ELSE () + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver) ENDIF () + ELSEIF (TD_DARWIN_64) + SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") + INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") + INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") + INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin)") ENDIF () ENDIF () diff --git a/README.md b/README.md index c82cda6f3cafe148c317b132edcb7041137670e0..8d60a892e0d89ae1a3a39644cd1633b19598d66e 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,24 @@ sudo apt-get install maven ``` Build TDengine: -```cmd +``` mkdir build && cd build cmake .. && cmake --build . ``` +To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below: + +aarch64: +```cmd +cmake .. -DCPUTYPE=aarch64 && cmake --build . +``` + +aarch32: +```cmd +cmake .. -DCPUTYPE=aarch32 && cmake --build . +``` + # Quick Run To quickly start a TDengine server after building, run the command below in terminal: ```cmd @@ -118,3 +130,8 @@ The TDengine community has also kindly built some of their own connectors! Follo # Contribute to TDengine Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project. + +# Join TDengine WeChat Group + +Add WeChat “tdengine” to join the group,you can communicate with other users. + diff --git a/deps/iconv/iconv.c b/deps/iconv/iconv.c index b84a09fd0ae9d0920152fcd2c08ac4fa7c2ad268..391e35d4e78efda0cb881b247cbccf22ab66c67b 100644 --- a/deps/iconv/iconv.c +++ b/deps/iconv/iconv.c @@ -175,7 +175,10 @@ static const struct alias sysdep_aliases[] = { #ifdef __GNUC__ __inline #endif -const struct alias * +// gcc -o0 bug fix +// see http://git.savannah.gnu.org/gitweb/?p=libiconv.git;a=blobdiff;f=lib/iconv.c;h=31853a7f1c47871221189dbf597473a16d8a8da7;hp=5a1a32597fa3efc5f69624d37a2eb96f308cd241;hb=b29089d8b43abc8fba073da7e6dccaeba56b2b70;hpb=0a04404c90d6a725b8b6bbcd65e10c5fcf5993e9 + +static const struct alias * aliases2_lookup (register const char *str) { const struct alias * ptr; diff --git a/documentation/tdenginedocs-cn/connector/index.html b/documentation/tdenginedocs-cn/connector/index.html index fca343e977fa48fc9b1a6fc4bb55e23c0056fe7b..3167c1521f099f8acd7ae237cc37bd5867ee209a 100644 --- a/documentation/tdenginedocs-cn/connector/index.html +++ b/documentation/tdenginedocs-cn/connector/index.html @@ -114,23 +114,84 @@ public Connection getConn() throws Exception{

对于TDengine操作的报错信息,用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码,请参考TDengine提供的使用示范项目JDBCDemo

Python Connector

-

Python客户端安装

-

用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装:

-

pip install src/connector/python/python2/

-

-

pip install src/connector/python/python3/

-

如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。

-

Python客户端接口

-

在使用TDengine的python接口时,需导入TDengine客户端模块:

-
import taos 
-

用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法:

+

安装准备

+
  • 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端
  • +
  • 已安装python 2.7 or >= 3.4
  • +
  • 已安装pip
  • +

    安装

    +

    Linux

    +

    用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包, 然后通过pip命令安装

    +
    pip install src/connector/python/linux/python2/
    +

    或者

    +
    pip install src/connector/python/linux/python3/
    +

    Windows

    +

    在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python2\
    +

    或者

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python3\
    +

    * 如果机器上没有pip命令,用户可将src/connector/python/windows/python3或src/connector/python/windows/python2下的taos文件夹拷贝到应用程序的目录使用。

    +

    使用

    +

    代码示例

    +
  • 导入TDengine客户端模块:
  • +
    import taos 
    +
  • 获取连接
  • +
    
    +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
    +c1 = conn.cursor()
    +
    +

    * host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录

    +
  • 写入数据
  • +
    
    +import datetime
    + 
    +# 创建数据库
    +c1.execute('create database db')
    +c1.execute('use db')
    +# 建表
    +c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
    +# 插入数据
    +start_time = datetime.datetime(2019, 11, 1)
    +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
    +# 批量插入数据
    +time_interval = datetime.timedelta(seconds=60)
    +sqlcmd = ['insert into tb values']
    +for irow in range(1,11):
    +  start_time += time_interval
    +  sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
    +affected_rows = c1.execute(' '.join(sqlcmd))
    +
    +
  • 查询数据
  • +
    +c1.execute('select * from tb')
    +# 拉取查询结果
    +data = c1.fetchall()
    +# 返回的结果是一个列表,每一行构成列表的一个元素
    +numOfRows = c1.rowcount
    +numOfCols = c1.descriptions
    +for irow in range(numOfRows):
    +  print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
    +  
    +# 直接使用cursor 循环拉取查询结果
    +c1.execute('select * from tb')
    +for data in c1:
    +  print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
    +
    +
  • 关闭连接
  • +
    +c1.close()
    +conn.close()
    +
    +

    帮助信息

    +

    用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法:

    RESTful Connector

    为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。

    diff --git a/documentation/tdenginedocs-cn/getting-started/index.html b/documentation/tdenginedocs-cn/getting-started/index.html index 65ef667d001ecabeaf5f85bddb1ca86d8f75acb8..d7d5d8540c6c46bbf5210677339c2ee202a7ec86 100644 --- a/documentation/tdenginedocs-cn/getting-started/index.html +++ b/documentation/tdenginedocs-cn/getting-started/index.html @@ -28,7 +28,7 @@

    在TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例:

    create database db;
     use db;
    -create table t (ts timestamp, cdata int);
    +create table t (ts timestamp, speed int);
     insert into t values ('2019-07-15 00:00:00', 10);
     insert into t values ('2019-07-15 01:00:00', 20);
     select * from t;
    @@ -85,4 +85,4 @@ Query OK, 2 row(s) in set (0.001700s)

    TDengine是专为物联网、车联网、工业互联网、运维监测等场景优化设计的时序数据处理引擎。与其他方案相比,它的插入查询速度都快10倍以上。单核一秒钟就能插入100万数据点,读出1000万数据点。由于采用列式存储和优化的压缩算法,存储空间不及普通数据库的1/10.

    深入了解TDengine

    -

    请继续阅读文档来深入了解TDengine。

    回去 \ No newline at end of file +

    请继续阅读文档来深入了解TDengine。

    回去 diff --git a/documentation/tdenginedocs-cn/super-table/index.html b/documentation/tdenginedocs-cn/super-table/index.html index 828a69bb0ceaaefcc526e95042319d565e841d2d..42d54ce7e260a7955f746002091919043dc318ff 100644 --- a/documentation/tdenginedocs-cn/super-table/index.html +++ b/documentation/tdenginedocs-cn/super-table/index.html @@ -32,7 +32,7 @@ tags (location binary(20), type int)

    查看数据库内全部STable,及其相关信息,包括STable的名称、创建时间、列数量、标签(TAG)数量、通过该STable建表的数量。

  • 删除超级表

    DROP TABLE <stable_name>
    -

    Note: 删除STable不会级联删除通过STable创建的表;相反删除STable时要求通过该STable创建的表都已经被删除。

  • +

    Note: 删除STable时,所有通过该STable创建的表都将被删除。

  • 查看属于某STable并满足查询条件的表

    SELECT TBNAME,[TAG_NAME,…] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)

    查看属于某STable并满足查询条件的表。说明:TBNAME为关键词,显示通过STable建立的子表表名,查询过程中可以使用针对标签的条件。

    diff --git a/documentation/tdenginedocs-en/connector/index.html b/documentation/tdenginedocs-en/connector/index.html index ce32c062ffa6b92c257be97831e4fb8c292ec64d..0f9e6b4717c1c32716046a38bef18268cc2e0408 100644 --- a/documentation/tdenginedocs-en/connector/index.html +++ b/documentation/tdenginedocs-en/connector/index.html @@ -122,15 +122,76 @@ public Connection getConn() throws Exception{

    All the error codes and error messages can be found in TSDBError.java . For a more detailed coding example, please refer to the demo project JDBCDemo in TDengine's code examples.

    Python Connector

    -

    Install TDengine Python client

    -

    Users can find python client packages in our source code directory src/connector/python. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use pip command to install:

    -
    pip install src/connector/python/python2/
    +

    Pre-requirement

    +
  • TDengine installed, TDengine-client installed if on Windows
  • +
  • python 2.7 or >= 3.4
  • +
  • pip installed
  • +

    Installation

    +

    Linux

    +

    Users can find python client packages in our source code directory src/connector/python. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use pip command to install:

    +
    pip install src/connector/python/linux/python2/

    or

    -
    pip install src/connector/python/python3/
    -

    If pip command is not installed on the system, users can choose to install pip or just copy the taos directory in the python client directory to the application directory to use.

    -

    Python client interfaces

    -

    To use TDengine Python client, import TDengine module at first:

    +
    pip install src/connector/python/linux/python3/
    +

    Windows

    +

    Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the cmd Windows command interface

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python2\
    +

    or

    +
    cd C:\TDengine\connector\python\windows
    +
    pip install python3\
    +

    * If pip command is not installed on the system, users can choose to install pip or just copy the taos directory in the python client directory to the application directory to use.

    +

    Usage

    +

    Examples

    +
  • import TDengine module at first:
  • import taos 
    +
  • get the connection
  • +
    
    +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
    +c1 = conn.cursor()
    +
    +

    * host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file

    +
  • insert records into the database
  • +
    
    +import datetime
    + 
    +# create a database
    +c1.execute('create database db')
    +c1.execute('use db')
    +# create a table
    +c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
    +# insert a record
    +start_time = datetime.datetime(2019, 11, 1)
    +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
    +# insert multiple records in a batch
    +time_interval = datetime.timedelta(seconds=60)
    +sqlcmd = ['insert into tb values']
    +for irow in range(1,11):
    +  start_time += time_interval
    +  sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
    +affected_rows = c1.execute(' '.join(sqlcmd))
    +
    +
  • query the database
  • +
    +c1.execute('select * from tb')
    +# fetch all returned results
    +data = c1.fetchall()
    +# data is a list of returned rows with each row being a tuple
    +numOfRows = c1.rowcount
    +numOfCols = c1.descriptions
    +for irow in range(numOfRows):
    +  print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
    +  
    +# use the cursor as an iterator to retrieve all returned results
    +c1.execute('select * from tb')
    +for data in c1:
    +  print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
    +
    +
  • close the connection
  • +
    +c1.close()
    +conn.close()
    +
    +

    Help information

    Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:

    TDengine is specially designed and optimized for time-series data processing in IoT, connected cars, Industrial IoT, IT infrastructure and application monitoring, and other scenarios. Compared with other solutions, it is 10x faster on insert/query speed. With a single-core machine, over 20K requestes can be processed, millions data points can be ingested, and over 10 million data points can be retrieved in a second. Via column-based storage and tuned compression algorithm for different data types, less than 1/10 storage space is required.

    Explore More on TDengine

    -

    Please read through the whole documentation to learn more about TDengine.

    Back \ No newline at end of file +

    Please read through the whole documentation to learn more about TDengine.

    Back diff --git a/documentation/tdenginedocs-en/super-table/index.html b/documentation/tdenginedocs-en/super-table/index.html index 21e7669a19118fd0bfd9b15693ae38bc66edbb0b..0e47a7bb9b4a170c70f9b484096f3af625865961 100644 --- a/documentation/tdenginedocs-en/super-table/index.html +++ b/documentation/tdenginedocs-en/super-table/index.html @@ -73,7 +73,7 @@ INTERVAL(10M)

    It lists the STable's schema and tags

    Drop a STable

    DROP TABLE <stable_name>
    -

    To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.

    +

    To delete a STable, all the tables created via this STable will be deleted.

    List the Associated Tables of a STable

    SELECT TBNAME,[TAG_NAME, ...] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] ...)

    It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable.

    diff --git a/documentation/webdocs/assets/add_datasource1.jpg b/documentation/webdocs/assets/add_datasource1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f0f5110f312c57f3ec1788bbc02f04fac6ac142 Binary files /dev/null and b/documentation/webdocs/assets/add_datasource1.jpg differ diff --git a/documentation/webdocs/assets/add_datasource2.jpg b/documentation/webdocs/assets/add_datasource2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa7a83e00e96fae649910dff4edf5f5bdadd7850 Binary files /dev/null and b/documentation/webdocs/assets/add_datasource2.jpg differ diff --git a/documentation/webdocs/assets/add_datasource3.jpg b/documentation/webdocs/assets/add_datasource3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d8a4a5d7644e9fb6dfbd86ed897cdaf2093148f Binary files /dev/null and b/documentation/webdocs/assets/add_datasource3.jpg differ diff --git a/documentation/webdocs/assets/add_datasource4.jpg b/documentation/webdocs/assets/add_datasource4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..befb4243414554f24d165688f5117f1e70f8f21d Binary files /dev/null and b/documentation/webdocs/assets/add_datasource4.jpg differ diff --git a/documentation/webdocs/assets/clip_image001-2474914.png b/documentation/webdocs/assets/clip_image001-2474914.png deleted file mode 100644 index eb369b1567c860b772e1bfdad64ff17aaac2534d..0000000000000000000000000000000000000000 Binary files a/documentation/webdocs/assets/clip_image001-2474914.png and /dev/null differ diff --git a/documentation/webdocs/assets/clip_image001-2474939.png b/documentation/webdocs/assets/clip_image001-2474939.png deleted file mode 100644 index 53f00deea3a484986a5681ec9d00d8ae02e88fec..0000000000000000000000000000000000000000 Binary files a/documentation/webdocs/assets/clip_image001-2474939.png and /dev/null differ diff --git a/documentation/webdocs/assets/clip_image001-2474961.png b/documentation/webdocs/assets/clip_image001-2474961.png deleted file mode 100644 index 20ae8d6f7724a4bddcf8c7eb3809d468aa4223ed..0000000000000000000000000000000000000000 Binary files a/documentation/webdocs/assets/clip_image001-2474961.png and /dev/null differ diff --git a/documentation/webdocs/assets/clip_image001-2474987.png b/documentation/webdocs/assets/clip_image001-2474987.png deleted file mode 100644 index 3d09f7fc28e7a1fb7e3bb2b9b2bc7c20895e8bb4..0000000000000000000000000000000000000000 Binary files a/documentation/webdocs/assets/clip_image001-2474987.png and /dev/null differ diff --git a/documentation/webdocs/assets/clip_image001.png b/documentation/webdocs/assets/clip_image001.png deleted file mode 100644 index 78b6d06a9562b802e80f0ed5fdb8963b5e525589..0000000000000000000000000000000000000000 Binary files a/documentation/webdocs/assets/clip_image001.png and /dev/null differ diff --git a/documentation/webdocs/assets/create_dashboard1.jpg b/documentation/webdocs/assets/create_dashboard1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b83c3a1714e9e7540e0b06239ef7c1c4f63fe2c Binary files /dev/null and b/documentation/webdocs/assets/create_dashboard1.jpg differ diff --git a/documentation/webdocs/assets/create_dashboard2.jpg b/documentation/webdocs/assets/create_dashboard2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe5d768ac55254251e0290bf257178f5ff28f5a5 Binary files /dev/null and b/documentation/webdocs/assets/create_dashboard2.jpg differ diff --git a/documentation/webdocs/assets/import_dashboard1.jpg b/documentation/webdocs/assets/import_dashboard1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d2ce7ed65eb0c2c729de50283b30491793493dc Binary files /dev/null and b/documentation/webdocs/assets/import_dashboard1.jpg differ diff --git a/documentation/webdocs/assets/import_dashboard2.jpg b/documentation/webdocs/assets/import_dashboard2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94b09f0ee39552bb84f7ba1f65815ce2c9548b2d Binary files /dev/null and b/documentation/webdocs/assets/import_dashboard2.jpg differ diff --git a/documentation/webdocs/markdowndocs/Connections with other Tools-ch.md b/documentation/webdocs/markdowndocs/Connections with other Tools-ch.md index d7fd075097281bddddbe8c4b0e27736d5d718330..f7da0c654d493bb932b2b0edceccd16fbac2091b 100644 --- a/documentation/webdocs/markdowndocs/Connections with other Tools-ch.md +++ b/documentation/webdocs/markdowndocs/Connections with other Tools-ch.md @@ -34,7 +34,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/) ### 安装Grafana -目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统,到Grafana官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download +目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统,到Grafana官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download。 ### 配置Grafana @@ -42,43 +42,60 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录 以CentOS 7.2操作系统为例,将tdengine目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。 -### 使用Grafana +### 使用 Grafana -用户可以直接通过localhost:3000的网址,登录Grafana服务器(用户名/密码:admin/admin),配置TDengine数据源,如下图所示,此时可以在下拉列表中看到TDengine数据源。 +#### 配置数据源 -![img](../assets/clip_image001.png) +用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: -TDengine数据源中的HTTP配置里面的Host地址要设置为TDengine集群的中任意一台服务器的IP地址与TDengine RESTful接口的端口号(6020)。假设TDengine数据库与Grafana部署在同一机器,那么应输入:http://localhost:6020。 +![img](../assets/add_datasource1.jpg) -此外,还需配置登录TDengine的用户名与密码,然后点击下图中的Save&Test按钮保存。 +点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: -![img](../assets/clip_image001-2474914.png) +![img](../assets/add_datasource2.jpg) - +进入数据源配置页面,按照默认提示修改相应配置即可: -然后,就可以在Grafana的数据源列表中看到刚创建好的TDengine的数据源: +![img](../assets/add_datasource3.jpg) -![img](../assets/clip_image001-2474939.png) +* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6020),默认 http://localhost:6020。 +* User:TDengine 用户名。 +* Password:TDengine 用户密码。 - +点击 `Save & Test` 进行测试,成功会有如下提示: -基于上面的步骤,就可以在创建Dashboard的时候使用TDengine数据源,如下图所示: +![img](../assets/add_datasource4.jpg) -![img](../assets/clip_image001-2474961.png) +#### 创建 Dashboard - +回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: -然后,可以点击Add Query按钮增加一个新查询。 +![img](../assets/create_dashboard1.jpg) -在INPUT SQL输入框中输入查询SQL语句,该SQL语句的结果集应为两行多列的曲线数据,例如SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<​to interval(interval)。其中,from、to和interval为TDengine插件的内置变量,表示从Grafana插件面板获取的查询范围和时间间隔。 +如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 sql 进行查询,具体说明如下: -ALIAS BY输入框为查询的别名,点击GENERATE SQL 按钮可以获取发送给TDengine的SQL语句。如下图所示: +* INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine插件的内置变量,表示从Grafana插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。 +* ALIAS BY:可设置当前查询别名。 +* GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 + +按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: -![img](../assets/clip_image001-2474987.png) +![img](../assets/create_dashboard2.jpg) - +> 关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息,请参考Grafana官方的[文档](https://grafana.com/docs/)。 + +#### 导入 Dashboard + +在 Grafana 插件目录 /usr/local/taos/connector/grafana/tdengine/dashboard/ 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。 + +点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件: + +![img](../assets/import_dashboard1.jpg) + +导入完成之后可看到如下效果: + +![img](../assets/import_dashboard2.jpg) -关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息,请参考Grafana官方的[文档](https://grafana.com/docs/)。 ## Matlab diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index a8cea06cc82767d3b43b38a3637b3274c724f6f0..a0433d1f09d7c5f2ec1205f89d2efe638703dc7d 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -175,79 +175,135 @@ TDengine provides APIs for continuous query driven by time, which run queries pe ### C/C++ subscription API -For the time being, TDengine supports subscription on one table. It is implemented through periodic pulling from a TDengine server. +For the time being, TDengine supports subscription on one or multiple tables. It is implemented through periodic pulling from a TDengine server. -- `TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)` - The API is used to start a subscription session by given a handle. The parameters required are _host_ (IP address of a TDenginer server), _user_ (username), _pass_ (password), _db_ (database to use), _table_ (table name to subscribe), _time_ (start time to subscribe, 0 for now), _mseconds_ (pulling period). If failed to open a subscription session, a _NULL_ pointer is returned. +* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` + The API is used to start a subscription session, it returns the subscription object on success and `NULL` in case of failure, the parameters are: + * **taos**: The database connnection, which must be established already. + * **restart**: `Zero` to continue a subscription if it already exits, other value to start from the beginning. + * **topic**: The unique identifier of a subscription. + * **sql**: A sql statement for data query, it can only be a `select` statement, can only query for raw data, and can only query data in ascending order of the timestamp field. + * **fp**: A callback function to receive query result, only used in asynchronization mode and should be `NULL` in synchronization mode, please refer below for its prototype. + * **param**: User provided additional parameter for the callback function. + * **interval**: Pulling interval in millisecond. Under asynchronization mode, API will call the callback function `fp` in this interval, system performance will be impacted if this interval is too short. Under synchronization mode, if the duration between two call to `taos_consume` is less than this interval, the second call blocks until the duration exceed this interval. -- `TAOS_ROW taos_consume(TAOS_SUB *tsub)` - The API used to get the new data from a TDengine server. It should be put in an infinite loop. The parameter _tsub_ is the handle returned by _taos_subscribe_. If new data are updated, the API will return a row of the result. Otherwise, the API is blocked until new data arrives. If _NULL_ pointer is returned, it means an error occurs. +* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` + Prototype of the callback function, the parameters are: + * tsub: The subscription object. + * res: The query result. + * param: User provided additional parameter when calling `taos_subscribe`. + * code: Error code in case of failures. -- `void taos_unsubscribe(TAOS_SUB *tsub)` - Stop a subscription session by the handle returned by _taos_subscribe_. +* `TAOS_RES *taos_consume(TAOS_SUB *tsub)` + The API used to get the new data from a TDengine server. It should be put in an loop. The parameter `tsub` is the handle returned by `taos_subscribe`. This API should only be called in synchronization mode. If the duration between two call to `taos_consume` is less than pulling interval, the second call blocks until the duration exceed the interval. The API returns the new rows if new data arrives, or empty rowset otherwise, and if there's an error, it returns `NULL`. + +* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` + + Stop a subscription session by the handle returned by `taos_subscribe`. If `keepProgress` is **not** zero, the subscription progress information is kept and can be reused in later call to `taos_subscribe`, the information is removed otherwise. -- `int taos_num_subfields(TAOS_SUB *tsub)` - The API used to get the number of fields in a row. +## Java Connector +To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1]. -- `TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)` - The API used to get the description of each column. +Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver: -## Java Connector +* libtaos.so (Linux) + After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path. + +* taos.dll (Windows) + After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path. + +> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15]. + +Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: +* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method. +* No support for transaction +* No support for union between tables +* No support for nested query,`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`. + +## Version list of TAOS-JDBCDriver and required TDengine and JDK + +| taos-jdbcdriver | TDengine | JDK | +| --- | --- | --- | +| 1.0.3 | 1.6.1.x or higher | 1.8.x | +| 1.0.2 | 1.6.1.x or higher | 1.8.x | +| 1.0.1 | 1.6.1.x or higher | 1.8.x | -### JDBC Interface +## DataType in TDengine and Java -TDengine provides a JDBC driver `taos-jdbcdriver-x.x.x.jar` for Enterprise Java developers. TDengine's JDBC Driver is implemented as a subset of the standard JDBC 3.0 Specification and supports the most common Java development frameworks. The driver have been published to dependency repositories such as Sonatype Maven Repository, and users could refer to the following `pom.xml` configuration file. +The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java: + +| TDengine | Java | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT, TINYINT |java.lang.Short | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | + +## How to get TAOS-JDBC Driver + +### maven repository + +taos-jdbcdriver has been published to [Sonatype Repository][1]: +* [sonatype][8] +* [mvnrepository][9] +* [maven.aliyun][10] + +Using the following pom.xml for maven projects ```xml - - - oss-sonatype - oss-sonatype - https://oss.sonatype.org/content/groups/public - - - com.taosdata.jdbc taos-jdbcdriver - 1.0.1 + 1.0.3 ``` -Please note the JDBC driver itself relies on a native library written in C. On a Linux OS, the driver relies on a `libtaos.so` native library, where .so stands for "Shared Object". After the successful installation of TDengine on Linux, `libtaos.so` should be automatically copied to `/usr/local/lib/taos` and added to the system's default search path. On a Windows OS, the driver relies on a `taos.dll` native library, where .dll stands for "Dynamic Link Library". After the successful installation of the TDengine client on Windows, the `taos-jdbcdriver.jar` file can be found in `C:/TDengine/driver/JDBC`; the `taos.dll` file can be found in `C:/TDengine/driver/C` and should have been automatically copied to the system's searching path `C:/Windows/System32`. +### JAR file from the source code -Developers can refer to the Oracle's official JDBC API documentation for detailed usage on classes and methods. However, there are some differences of connection configurations and supported methods in the driver implementation between TDengine and traditional relational databases. +After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated. -For database connections, TDengine's JDBC driver has the following configurable parameters in the JDBC URL. The standard format of a TDengine JDBC URL is: +## Usage -`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +### get the connection -where `{}` marks the required parameters and `[]` marks the optional. The usage of each parameter is pretty straightforward: +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` +> `6030` is the default port and `log` is the default database for system monitor. -* user - login user name for TDengine; by default, it's `root` -* password - login password; by default, it's `taosdata` -* charset - the client-side charset; by default, it's the operation system's charset -* cfgdir - the directory of TDengine client configuration file; by default it's `/etc/taos` on Linux and `C:\TDengine/cfg` on Windows -* locale - the language environment of TDengine client; by default, it's the operation system's locale -* timezone - the timezone of the TDengine client; by default, it's the operation system's timezone +A normal JDBC URL looks as follows: +`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` -All parameters can be configured at the time when creating a connection using the java.sql.DriverManager class, for example: +values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes: -```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.util.Properties; -import com.taosdata.jdbc.TSDBDriver; +* user:user name for login, defaultly root。 +* password:password for login,defaultly taosdata。 +* charset:charset for client,defaultly system charset +* cfgdir:log directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。 +* locale:language for client,defaultly system locale。 +* timezone:timezone for client,defaultly system timezone。 + +The options above can be configures (`ordered by priority`): +1. JDBC URL + As explained above. +2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) +```java public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); @@ -260,42 +316,316 @@ public Connection getConn() throws Exception{ } ``` -Except `cfgdir`, all the parameters listed above can also be configured in the configuration file. The properties specified when calling DriverManager.getConnection() has the highest priority among all configuration methods. The JDBC URL has the second-highest priority, and the configuration file has the lowest priority. The explicitly configured parameters in a method with higher priorities always overwrite that same parameter configured in methods with lower priorities. For example, if `charset` is explicitly configured as "UTF-8" in the JDBC URL and "GKB" in the `taos.cfg` file, then "UTF-8" will be used. +3. Configuration file (taos.cfg) + + Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows +```properties +# client default username +# defaultUser root + +# client default password +# defaultPass taosdata + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` +> More options can refer to [client configuration][13] + +### Create databases and tables + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` +> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables + +### Insert data + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` +> _now_ is the server time. +> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year). + +### Query database + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` +> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results. + +### Close all + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` +> `please make sure the connection is closed to avoid the error like connection leakage` + +## Using connection pool + +**HikariCP** + +* dependence in pom.xml: +```xml + + com.zaxxer + HikariCP + 3.4.1 + +``` + +* Examples: +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMinimumIdle(3); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool + config.setIdleTimeout(60000); // max idle time for recycle idle connection + config.setConnectionTestQuery("describe log.dn"); //validation query + config.setValidationTimeout(3000); //validation query timeout + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool. +> More instructions can refer to [User Guide][5] + +**Druid** + +* dependency in pom.xml: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* Examples: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> More instructions can refer to [User Guide][6] + +**Notice** +* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`. + +As follows,`1` will be returned if `select server_status()` is successfully executed。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +## Integrated with framework -Although the JDBC driver is implemented following the JDBC standard as much as possible, there are major differences between TDengine and traditional databases in terms of data models that lead to the differences in the driver implementation. Here is a list of head-ups for developers who have plenty of experience on traditional databases but little on TDengine: +* Please refer to [SpringJdbcTemplate][11] if using taos-jdbcdriver in Spring JdbcTemplate +* Please refer to [springbootdemo][12] if using taos-jdbcdriver in Spring JdbcTemplate -* TDengine does NOT support updating or deleting a specific record, which leads to some unsupported methods in the JDBC driver -* TDengine currently does not support `join` or `union` operations, and thus, is lack of support for associated methods in the JDBC driver -* TDengine supports batch insertions which are controlled at the level of SQL statement writing instead of API calls -* TDengine doesn't support nested queries and neither does the JDBC driver. Thus for each established connection to TDengine, there should be only one open result set associated with it +## FAQ + +* java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **Cause**:The application program cannot find Library function _taos_ + + **Answer**:Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux. + +* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **Cause**:Currently TDengine only support 64bit JDK + + **Answer**:re-install 64bit JDK. + +* For other questions, please refer to [Issues][7] -All the error codes and error messages can be found in `TSDBError.java` . For a more detailed coding example, please refer to the demo project `JDBCDemo` in TDengine's code examples. ## Python Connector -### Install TDengine Python client +### Pre-requirement +* TDengine installed, TDengine-client installed if on Windows [(Windows TDengine client installation)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口) +* python 2.7 or >= 3.4 +* pip installed + +### Installation +#### Linux -Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use _pip_ command to install: +Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use _pip_ command to install: ```cmd -pip install src/connector/python/[linux|Windows]/python2/ +pip install src/connector/python/linux/python3/ ``` or ``` -pip install src/connector/python/[linux|Windows]/python3/ +pip install src/connector/python/linux/python2/ +``` +#### Windows +Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the _cmd_ Windows command interface ``` +cd C:\TDengine\connector\python\windows +pip install python3\ +``` +or +``` +cd C:\TDengine\connector\python\windows +pip install python2\ +``` +*If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use. -If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use. +### Usage +#### Examples +* import TDengine module -### Python client interfaces +```python +import taos +``` +* get the connection +```python +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") +c1 = conn.cursor() +``` +*host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file +* insert records into the database +```python +import datetime + +# create a database +c1.execute('create database db') +c1.execute('use db') +# create a table +c1.execute('create table tb (ts timestamp, temperature int, humidity float)') +# insert a record +start_time = datetime.datetime(2019, 11, 1) +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time) +# insert multiple records in a batch +time_interval = datetime.timedelta(seconds=60) +sqlcmd = ['insert into tb values'] +for irow in range(1,11): + start_time += time_interval + sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2)) +affected_rows = c1.execute(' '.join(sqlcmd)) +``` +* query the database +```python +c1.execute('select * from tb') +# fetch all returned results +data = c1.fetchall() +# data is a list of returned rows with each row being a tuple +numOfRows = c1.rowcount +numOfCols = len(c1.description) +for irow in range(numOfRows): + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) + +# use the cursor as an iterator to retrieve all returned results +c1.execute('select * from tb') +for data in c1: + print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) +``` -To use TDengine Python client, import TDengine module at first: +* create a subscription +```python +# Create a subscription with topic 'test' and consumption interval 1000ms. +# The first argument is True means to restart the subscription; +# if the subscription with topic 'test' has already been created, then pass +# False to this argument means to continue the existing subscription. +sub = conn.subscribe(True, "test", "select * from meters;", 1000) +``` +* consume a subscription ```python -import taos +data = sub.consume() +for d in data: + print(d) +``` + +* close the subscription +```python +sub.close() +``` + +* close the connection +```python +c1.close() +conn.close() ``` +#### Help information Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below: @@ -569,3 +899,18 @@ An example of using the NodeJS connector to create a table with weather data and An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3 +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B diff --git a/documentation/webdocs/markdowndocs/Super Table-ch.md b/documentation/webdocs/markdowndocs/Super Table-ch.md index e75a8d46c38e3501b2c17b4a05b68c0e8fa4a707..38e6f8c17f900ad9597366f5131b3f1eebeb8863 100644 --- a/documentation/webdocs/markdowndocs/Super Table-ch.md +++ b/documentation/webdocs/markdowndocs/Super Table-ch.md @@ -1,6 +1,6 @@ # 超级表STable:多表聚合 -TDengine要求每个数据采集点单独建表,这样能极大提高数据的插入/查询性能,但是导致系统中表的数量猛增,让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度,TDengine引入了超级表STable (Super Table)的概念。 +TDengine要求每个数据采集点单独建表。独立建表的模式能够避免写入过程中的同步加锁,因此能够极大地提升数据的插入/查询性能。但是独立建表意味着系统中表的数量与采集点的数量在同一个量级。如果采集点众多,将导致系统中表的数量也非常庞大,让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度,TDengine引入了超级表(Super Table, 简称为STable)的概念。 ## 什么是超级表 @@ -9,14 +9,14 @@ STable是同一类型数据采集点的抽象,是同类型采集实例的集 TDengine扩展标准SQL语法用于定义STable,使用关键词tags指定标签信息。语法如下: ```mysql -CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …) +CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …) ``` -其中tag_name是标签名,tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型,标签的个数最多为6个,名字不能与系统关键词相同,也不能与其他列名相同。如: +其中tag_name是标签名,tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型,标签的个数最多为32个,名字不能与系统关键词相同,也不能与其他列名相同。如: ```mysql -create table thermometer (ts timestamp, degree float) -tags (location binary(20), type int) +CREATE TABLE thermometer (ts timestamp, degree float) +TAGS (location binary(20), type int) ``` 上述SQL创建了一个名为thermometer的STable,带有标签location和标签type。 @@ -30,7 +30,7 @@ CREATE TABLE USING TAGS (tag_value1,...) 沿用上面温度计的例子,使用超级表thermometer建立单个温度计数据表的语句如下: ```mysql -create table t1 using thermometer tags ('beijing', 10) +CREATE TABLE t1 USING thermometer TAGS ('beijing', 10) ``` 上述SQL以thermometer为模板,创建了名为t1的表,这张表的Schema就是thermometer的Schema,但标签location值为'beijing',标签type值为10。 @@ -72,7 +72,7 @@ STable从属于库,一个STable只属于一个库,但一个库可以有一 DROP TABLE ``` - Note: 删除STable不会级联删除通过STable创建的表;相反删除STable时要求通过该STable创建的表都已经被删除。 + Note: 删除STable时,所有通过该STable创建的表都将被删除。 - 查看属于某STable并满足查询条件的表 diff --git a/documentation/webdocs/markdowndocs/Super Table.md b/documentation/webdocs/markdowndocs/Super Table.md index 609dd11bd278da3398330fa33f857fac65ffb3d5..efc95c5f79216b2a887f26a565b4e5e123768c6b 100644 --- a/documentation/webdocs/markdowndocs/Super Table.md +++ b/documentation/webdocs/markdowndocs/Super Table.md @@ -142,7 +142,7 @@ It lists the STable's schema and tags DROP TABLE ``` -To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail. +To delete a STable, all the tables created via this STable will be deleted first. ### List the Associated Tables of a STable diff --git a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md index 6a8549bbd237a3413abfb28f26e0064140fe47d9..cd184cbc71b36ee8cac3738c4f28772547c9d8c0 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md @@ -1,6 +1,8 @@ # TAOS SQL -TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句操纵数据库,也可以通过C/C++, Java(JDBC), Python, Go等各种程序来执行SQL语句。 +本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。 + +TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供修改和更新功能,因此在TAO SQL中不提供数据更新和数据删除的相关功能。 本章节SQL语法遵循如下约定: @@ -9,15 +11,46 @@ TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句 - | 表示多选一,选择其中一个即可,但不能输入|本身 - … 表示前面的项可重复多个 +为更好地说明SQL语法的规则及其特点,本文假设存在一个数据集。该数据集是针对两种类型的设备温度(湿度)传感器、气压(海拔)传感器建立的数据模型。 +针对温度传感器,具有超级表(super table) temp_stable。其数据模型如下: +``` +taos> describe temp_stable; +Field | Type | Length | Note | +======================================================================================================= +ts |TIMESTAMP | 8 | | +temperature |FLOAT | 4 | | +humidity |TINYINT | 1 | | +status |TINYINT | 1 | | +deviceid |BIGINT | 12 |tag | +location |BINARY | 20 |tag | +``` +数据集包含2个温度传感器的数据,按照TDengine的建模规则,对应2个子表,其名称分别是 temp_tb_1,temp_tb_2 。 +针对压力(海拔)传感器,具有超级表(super table) pressure_stable。其数据模型如下: +数据集包含2个压力传感器数据,对应2个子表,分别是 press_tb_1,press_tb_2。 + +```text +taos> describe pressure_stable; +Field | Type | Length | Note | +======================================================================================================= +ts |TIMESTAMP | 8 | | +height |FLOAT | 4 | | +pressure |FLOAT | 4 | | +devstat |TINYINT | 1 | | +id |BIGINT | 8 |tag | +city |NCHAR | 20 |tag | +longitude |FLOAT | 4 |tag | +latitude |FLOAT | 4 |tag | +``` ## 支持的数据类型 使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: -- 时间格式为YYYY-MM-DD HH:mm:ss.MS, 默认时间分辨率为毫秒。比如:2017-08-12 18:25:58.128 +- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` - 内部函数now是服务器的当前时间 - 插入记录时,如果时间戳为0,插入数据时使用服务器当前时间 - Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 - 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据 +- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d) TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。 @@ -26,13 +59,13 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | | 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 | -| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31被用作Null值 | -| 3 | BIGINT | 8 | 长整型,范围 [-2^59, 2^59] | +| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null | +| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL | | 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数15-16,范围 [-1.7E308, 1.7E308] | | 6 | BINARY | 自定义 | 用于记录字符串,最长不能超过504 bytes。binary仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如binary(20)定义了最长为20个字符的字符串,每个字符占1byte的存储空间。如果用户字符串超出20字节,将被自动截断。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示, 即 **\’**。 | -| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767] | -| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127] | +| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL | +| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL | | 9 | BOOL | 1 | 布尔型,{true, false} | | 10 | NCHAR | 自定义 | 用于记录非ASCII字符串,如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 **\’**。nchar使用时须指定字符串大小,类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符,会固定占用40bytes的空间。如用户字符串长度超出声明长度,则将被自动截断。 | @@ -158,25 +191,179 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 同时向表tb1_name和tb2_name中按列分别插入多条记录 -注意:对同一张表,插入的新记录的时间戳必须递增,否则会跳过插入该条记录。如果时间戳为0,系统将自动使用服务器当前时间作为该记录的时间戳。 +注意:1、对同一张表,插入的新记录的时间戳必须递增,否则会跳过插入该条记录。如果时间戳为0,系统将自动使用服务器当前时间作为该记录的时间戳。 + 2、允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。 -**IMPORT**:如果需要将时间戳小于最后一条记录时间的记录写入到数据库中,可使用IMPORT替代INSERT命令,IMPORT的语法与INSERT完全一样。如果同时IMPORT多条记录,需要保证一批记录是按时间戳排序好的。 +**IMPORT**:如果需要将时间戳小于最后一条记录时间的记录写入到数据库中,可使用IMPORT替代INSERT命令,IMPORT的语法与INSERT完全一样。 ## 数据查询 -###查询语法是: +### 查询语法: ```mysql -SELECT {* | expr_list} FROM tb_name - [WHERE where_condition] - [ORDER BY _c0 { DESC | ASC }] - [LIMIT limit [, OFFSET offset]] - [>> export_file] - -SELECT function_list FROM tb_name - [WHERE where_condition] - [LIMIT limit [, OFFSET offset]] - [>> export_file] +SELECT [DISTINCT] select_expr [, select_expr ...] +FROM {tb_name_list} +[WHERE where_condition] +[INTERVAL [interval_offset,] interval_val] +[FILL fill_val] +[SLIDING fill_val] +[GROUP BY col_list] +[ORDER BY col_list { DESC | ASC }] +[HAVING expr_list] +[SLIMIT limit_val [, SOFFSET offset_val]] +[LIMIT limit_val [, OFFSET offset_val]] +[>> export_file] +``` +#### SELECT子句 +一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。 + +##### 通配符 +通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。 +``` +taos> select * from temp_tb_1; +ts | temperature |humidity|status| +============================================================ +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | +19-04-28 14:22:08.000| 21.50000 | 38 | 1 | +19-04-28 14:22:09.000| 21.30000 | 38 | 1 | +19-04-28 14:22:10.000| 21.20000 | 38 | 1 | +19-04-28 14:22:11.000| 21.30000 | 35 | 0 | +19-04-28 14:22:12.000| 22.00000 | 34 | 0 | +``` +在针对超级表,通配符包含 _标签列_ 。 +``` +taos> select * from temp_stable; +ts | temperature |humidity|status| deviceid | location | +============================================================================================== +19-04-28 14:22:07.000| 21.00000 | 37 | 1 |54197 |beijing | +19-04-28 14:22:07.000| 20.00000 | 34 | 1 |91234 |beijing | +19-04-28 14:22:08.000| 21.50000 | 38 | 1 |91234 |beijing | +19-04-28 14:22:09.000| 21.30000 | 38 | 1 |91234 |beijing | +19-04-28 14:22:10.000| 21.20000 | 38 | 1 |91234 |beijing | +19-04-28 14:22:11.000| 21.30000 | 35 | 0 |91234 |beijing | +19-04-28 14:22:12.000| 22.00000 | 34 | 0 |91234 |beijing | +``` +通配符支持表名前缀,以下两个SQL语句均为返回全部的列: +``` +select * from temp_tb_1; +select temp_tb_1.* from temp_tb_1; +``` +在Join查询中,带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。 +``` +taos> select * from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts; +ts | temperature |humidity|status| ts | temperature |humidity|status| +======================================================================================================================== +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | 19-04-28 14:22:07.000| 21.00000 | 37 | 1 | +``` + +``` +taos> select temp_tb_1.* from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts; +ts | temperature |humidity|status| +============================================================ +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | +``` + +在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于: +```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。 + +``` +taos> select count(*) from temp_tb_1; +count(*) | +====================== +1 | +``` + +``` +taos> select first(*) from temp_tb_1; +first(ts) | first(temperature) |first(humidity)|first(status)| +========================================================================== +19-04-28 14:22:07.000| 20.00000 | 34 | 1 | +``` + +#### 结果集列名 + +```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如: +``` +taos> select ts, ts as primary_key_ts from temp_tb_1; +ts | primary_key_ts | +============================================== +19-04-28 14:22:07.000| 19-04-28 14:22:07.000| +``` +但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。 + +#### DISTINCT修饰符* +只能用于修饰标签列(TAGS)的结果,不能用于修饰普通列来获得去重后的结果。并且应用```DISTINCT```以后,只能进行单列的标签输出。 +```count(distinct column_name)```用以返回近似的不重复结果的数量,该结果是近似值。 + +#### 隐式结果列 +```Select_exprs```可以是表所属列的列名,也可以是基于列的函数表达式或计算式,数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后,在最后返回结果中会强制返回时间戳列(第一列)和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出,列输出完全由select子句控制。 + +#### 表(超级表)列表 + +FROM关键字后面可以是若干个表(超级表)列表,也可以是子查询的结果。 +如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```sample.temp_tb_1``` 方式来跨库使用表。 +``` +SELECT * FROM sample.temp_tb_1; +------------------------------ +use sample; +SELECT * FROM temp_tb_1; +``` +From子句中列表可以使用别名来让SQL整体更加简单。 +``` +SELECT t.ts FROM temp_tb_1 t ; +``` +> 暂不支持FROM子句的表别名 + +#### 特殊功能 +部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database() +``` +taos> SELECT database(); +database() | +================================= +sample | +``` +如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据,则返回NULL。 +``` +taos> select database(); +database() | +================================= +NULL | +``` +获取服务器和客户端版本号: +``` +SELECT client_version() +SELECT server_version() +``` +服务器状态检测语句。如果服务器正常,返回一个数字(例如 1)。如果服务器异常,返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。 +``` +SELECT server_status() +SELECT server_status() AS result +``` +#### TAOS SQL中特殊关键词 + + > TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名
    + \_c0: 表示表(超级表)的第一列 + +#### 小技巧 +获取一个超级表所有的子表名及相关的标签信息: +``` +SELECT TBNAME, location FROM temp_stable +``` +统计超级表下辖子表数量: +``` +SELECT COUNT(TBNAME) FROM temp_stable +``` +以上两个查询均只支持在Where条件子句中添加针对标签(TAGS)的过滤条件。例如: +``` +taos> select count(tbname) from temp_stable; +count(tbname) | +====================== +2 | + +taos> select count(tbname) from temp_stable where deviceid > 60000; +count(tbname) | +====================== +1 | ``` - 可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名 @@ -237,7 +424,7 @@ SELECT function_list FROM tb_name ###聚合函数 -TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数如下表: +TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数如下: - **COUNT** ```mysql @@ -260,13 +447,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数 适用于:表、超级表。 -- **WAVG** +- **TWA** ```mysql - SELECT WAVG(field_name) FROM tb_name WHERE clause + SELECT TWA(field_name) FROM tb_name WHERE clause ``` - 功能说明:统计表/超级表中某列在一段时间内的时间加权平均。 + 功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。 返回结果数据类型:双精度浮点数Double。 - 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 说明:时间加权平均(time weighted average, TWA)查询需要指定查询时间段的 _开始时间_ 和 _结束时间_ 。 适用于:表、超级表。 @@ -370,7 +558,15 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 说明:*k*值取值范围0≤*k*≤100,为0的时候等同于MIN,为100的时候等同于MAX。 - +- **APERCENTILE** + ```mysql + SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause] + ``` + 功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。 + 返回结果数据类型: 双精度浮点数Double。 + 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 说明:*k*值取值范围0≤*k*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数 + - **LAST_ROW** ```mysql SELECT LAST_ROW(field_name) FROM { tb_name | stb_name } diff --git a/documentation/webdocs/markdowndocs/TAOS SQL.md b/documentation/webdocs/markdowndocs/TAOS SQL.md index 870529417fbb4dd9dd1e73bb253962e9293e94f4..99aa73b4354c60fb3c50ff8b3d2454eb6691c340 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL.md @@ -181,9 +181,10 @@ All the keywords in a SQL statement are case-insensitive, but strings values are tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...) ``` -Note: For a table, the new record must have a timestamp bigger than the last data record, otherwise, it will be discarded and not inserted. If the timestamp is 0, the time stamp will be set to the system time on the server. - -**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT. If you want to import a batch of historical records, the records must be ordered by the timestamp, otherwise, TDengine won't handle it in the right way. +Note: 1. For a table, the new record must have a timestamp bigger than the last data record, otherwise, it will be discarded and not inserted. If the timestamp is 0, the time stamp will be set to the system time on the server. + 2.The timestamp of the oldest record allowed to be inserted is relative to the current server time, minus the configured keep value (the number of days the data is retained), and the timestamp of the latest record allowed to be inserted is relative to the current server time, plus the configured days value (the time span in which the data file stores data, in days). Both keep and days can be specified when creating the database. The default values are 3650 days and 10 days, respectively. + +**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT. ## Data Query diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index ed822fb8c95b752b9844217915e0af34a3817643..35beb610f2789ea9d521eaa0314160a4ddff4025 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -2,15 +2,15 @@ ## 文件目录结构 -安装TDengine后,默认会在操作系统中生成下列目录或文件: +安装TDengine的过程中,安装程序将在操作系统中创建以下目录或文件: | 目录/文件 | 说明 | | ---------------------- | :------------------------------------------------| -| /etc/taos/taos.cfg | TDengine默认[配置文件] | -| /usr/local/taos/driver | TDengine动态链接库目录 | -| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. | -| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 | -| /usr/local/taos/bin | TDengine可执行文件目录 | +| /etc/taos/taos.cfg | 默认[配置文件] | +| /usr/local/taos/driver | 动态链接库目录 | +| /var/lib/taos | 默认数据文件目录,可通过[配置文件]修改位置. | +| /var/log/taos | 默认日志文件目录,可通过[配置文件]修改位置 | +| /usr/local/taos/bin | 可执行文件目录 | ### 可执行文件 @@ -19,33 +19,126 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 - _taosd_:TDengine服务端可执行文件 - _taos_: TDengine Shell可执行文件 - _taosdump_:数据导出工具 -- *rmtaos*: 一个卸载TDengine的脚本, 请谨慎执行 +- *rmtaos*: 卸载TDengine的脚本, 该脚本会删除全部的程序和数据文件。请务必谨慎执行,如非必须不建议使用。 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录 ## 服务端配置 -TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录,可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。 +TDengine系统后台服务程序是`taosd`,其启动时候读取的配置文件缺省目录是`/etc/taos`。可以通过命令行执行参数-c指定配置文件目录,比如 +``` +taosd -c /home/user +``` +指定`taosd`启动的时候读取`/home/user`目录下的配置文件taos.cfg。 下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。**注意:配置修改后,需要重启*taosd*服务才能生效。** -- internalIp: 对外提供服务的IP地址,默认取第一个IP地址 -- mgmtShellPort:管理节点与客户端通信使用的TCP/UDP端口号(默认值是6030)。此端口号在内向后连续的5个端口都会被UDP通信占用,即UDP占用[6030-6034],同时TCP通信也会使用端口[6030]。 -- vnodeShellPort:数据节点与客户端通信使用的TCP/UDP端口号(默认值是6035)。此端口号在内向后连续的5个端口都会被UDP通信占用,即UDP占用[6035-6039],同时TCP通信也会使用端口[6035] -- httpPort:数据节点对外提供RESTful服务使用TCP,端口号[6020] -- dataDir: 数据文件目录,缺省是/var/lib/taos -- maxUsers:用户的最大数量 -- maxDbs:数据库的最大数量 -- maxTables:数据表的最大数量 -- enableMonitor: 系统监测标志位,0:关闭,1:打开 -- logDir: 日志文件目录,缺省是/var/log/taos -- numOfLogLines:日志文件的最大行数 -- debugFlag: 系统debug日志开关,131:仅错误和报警信息,135:所有 +**privateIp** +- 默认值:物理节点IP地址列表中的第一个IP地址 + +对外提供服务的IP地址。 + +**publicIp** +- 默认值:与privateIp相同 + +对于阿里等云平台,此为公网IP地址,publicIp在内部映射为对应的privateIP地址,仅对企业版有效。 + +**masterIp** +- 默认值:与privateIp相同 + +集群内第一个物理节点的privateIp地址,仅对企业版有效。 + +**secondIp** +- 默认值:与privateIp相同 + +集群内第二个物理节点的privateIp地址,仅对企业版有效。 + +**mgmtShellPort** +- 默认值: _6030_ + +数据库服务中管理节点与客户端通信使用的TCP/UDP端口号。 +> 端口范围 _6030_ - _6034_ 均用于UDP通讯。此外,还使用端口 _6030_ 用于TCP通讯。 + +**vnodeShellPort** +- 默认值: _6035_ + +数据节点与客户端通信使用的TCP/UDP端口号。 +> 端口范围 _6035_ - _6039_ 的5个端口用于UDP通信。此外,还使用端口 _6035_ 用于TCP通讯。 + +**mgmtVnodePort** +- 默认值: _6040_ + +管理节点与数据节点通信使用的TCP/UDP端口号,仅对企业版有效。 +> 端口范围 _6040_ - _6044_ 的5个端口用于UDP通信。此外,还使用端口 _6040_ 用于TCP通讯。 + +**vnodeVnodePort** +- 默认值: _6045_ + +数据节点与数据节点通信使用的TCP/UDP端口号,仅对企业版有效。 +> 端口范围 _6045_ - _6049_ 的5个端口用于UDP通信。此外,还使用端口 _6045_ 用于TCP通讯。 + +**mgmtMgmtPort** +- 默认值: _6050_ + +管理节点与管理节点通信使用的UDP端口号,仅对企业版有效。 + +**mgmtSyncPort** +- 默认值: _6050_ + +管理节点与管理节点同步使用的TCP端口号,仅对企业版有效。 + +**httpPort** +- 默认值: _6020_ + +RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。 + +**dataDir** +- 默认值:/var/lib/taos + +数据文件目录,所有的数据文件都将写入该目录。 + +**logDir** +- 默认值:/var/log/taos + +日志文件目录,客户端和服务器的运行日志将写入该目录。 + +**maxUsers** +- 默认值:10,000 + +系统允许创建用户数量的上限 + +**maxDbs** +- 默认值:1,000 + +系统允许的创建数据库的上限 + +**maxTables** +- 默认值:650,000 + +系统允许创建数据表的上限。 +>系统能够创建的表受到多种因素的限制,单纯地增大该参数并不能直接增加系统能够创建的表数量。例如,由于每个表创建均需要消耗一定量的缓存空间,系统可用内存一定的情况下,创建表的总数的上限是一个固定的值。 + +**monitor** +- 默认值:1(激活状态) + +服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。0表示关闭监控服务,1表示激活监控服务。 + +**numOfLogLines** +- 默认值:10,000,000 + +单个日志文件允许的最大行数(10,000,000行)。 + +**debugFlag** +- 默认值:131(仅输出错误和警告信息) + +系统(服务端和客户端)运行日志开关: +- 131 仅输出错误和警告信息 +- 135 输入错误(ERROR)、警告(WARN)、信息(Info) 不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数: -- days:一个数据文件覆盖的时间长度,单位为天 -- keep:数据库中数据保留的天数 +- days:数据文件存储数据的时间跨度,单位为天 +- keep:数据保留的天数 - rows: 文件块中记录条数 - comp: 文件压缩标志位,0:关闭,1:一阶段压缩,2:两阶段压缩 - ctime:数据从写入内存到写入硬盘的最长时间间隔,单位为秒 @@ -66,19 +159,139 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 ## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos(Windows平台上为taos.exe)。与服务端程序一样,也可以通过设置taos.cfg来配置`taos`启动和运行的配置项。启动的时候如果不指定taos加载配置文件路径,默认读取`/etc/taos/`路径下的`taos.cfg`文件。指定配置文件来启动`taos`的命令如下: + +``` +taos -c /home/cfg/ +``` +**注意:启动设置的是配置文件所在目录,而不是配置文件本身** + +如果`/home/cfg/`目录下没有配置文件,程序会继续启动并打印如下告警信息: +```plaintext +Welcome to the TDengine shell from linux, client version:1.6.4.0 +option file:/home/cfg/taos.cfg not found, all options are set to system default +``` +更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。 + +客户端配置参数说明 + +**masterIP** +- 默认值:127.0.0.1 + +客户端连接的TDengine服务器IP地址,如果不设置默认连接127.0.0.1的节点。以下两个命令等效: +``` +taos +taos -h 127.0.0.1 +``` +其中的IP地址是从配置文件中读取的masterIP的值。 + +**locale** +- 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型`nchar`。写入`nchar`字段的数据将统一采用`UCS4-LE`格式进行编码并发送到服务器。需要注意的是,**编码正确性**是客户端来保证。因此,如果用户想要正常使用`nchar`字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。 + +客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为`UTF-8`,部分中文系统编码则可能是`GB18030`或`GBK`等。在docker环境中默认的编码是`POSIX`。在中文版Windows系统中,编码则是`CP936`。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证`nchar`中的数据正确转换为`UCS4-LE`编码格式。 + +在 Linux 中 locale 的命名规则为: +`<语言>_<地区>.<字符集编码>` +如:`zh_CN.UTF-8`,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与Mac OSX系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数`charset`来指定字符编码。在Linux系统中也可以使用charset来指定字符编码。 + +**charset** +- 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + +如果配置文件中不设置`charset`,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,**则中断启动过程**。 + +在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如: +``` +locale zh_CN.UTF-8 +``` +在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,`taos`默认设置为字符编码为`CP936`。其等效在配置文件中添加如下配置: +``` +charset CP936 +``` +如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。 + +在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。 +``` +locale zh_CN.UTF-8 +charset GBK +``` +则`charset`的有效值是`GBK`。 +``` +charset GBK +locale zh_CN.UTF-8 +``` +`charset`的有效值是`UTF-8`。 -客户端配置参数列表及解释 +**sockettype** +- 默认值:UDP -- masterIP:客户端默认发起请求的服务器的IP地址 -- charset:指明客户端所使用的字符集,默认值为UTF-8。TDengine存储nchar类型数据时使用的是unicode存储,因此客户端需要告知服务自己所使用的字符集,也即客户端所在系统的字符集。 -- locale:设置系统语言环境。Linux上客户端与服务端共享 -- defaultUser:默认登录用户,默认值root -- defaultPass:默认登录密码,默认值taosdata +客户端连接服务端的套接字的方式,可以使用`UDP`和`TCP`两种配置。 +在客户端和服务端之间的通讯需要经过恶劣的网络环境下(如公共网络、互联网)、客户端与数据库服务端连接不稳定(由于MTU的问题导致UDP丢包)的情况下,可以将连接的套接字类型调整为`TCP` -TCP/UDP端口,以及日志的配置参数,与server的配置参数完全一样。 +>注意:客户端套接字的类型需要和服务端的套接字类型相同,否则无法连接数据库。 -启动taos时,你也可以从命令行指定IP地址、端口号,用户名和密码,否则就从taos.cfg读取。 +**compressMsgSize** +- 默认值:-1(不压缩) + +客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值,默认值为-1(不压缩)。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。在配置文件中增加如下配置项即可: +``` +compressMsgSize 64330 +``` +如果配置项设置为0,`compressMsgSize 0`表示对所有的消息均进行压缩。 + +**timezone** +- 默认值:从系统中动态获取当前的时区设置 + +客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine采用Unix时间戳([Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time))来记录和存储时间戳。Unix时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的Unix时间戳,需要设置正确的时区。 + +在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如: +``` +timezone UTC-8 +timezone GMT-8 +timezone Asia/Shanghai +``` +均是合法的设置东八区时区的格式。 + + +时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词`now`的解析)产生影响。例如: +``` +SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; +``` +在东八区,SQL语句等效于 +``` +SELECT count(*) FROM table_name WHERE TS<1554955268000; +``` +在UTC时区,SQL语句等效于 +``` +SELECT count(*) FROM table_name WHERE TS<1554984068000; +``` +为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,`2013-04-12T15:52:01.123+08:00`或者ISO-8601格式时间戳字符串`2013-04-12T15:52:01.123+0800`。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。 + +**defaultUser** +- 默认值:root + +登录用户名,客户端登录的时候,如果不指定用户名,则自动使用该用户名登录。默认情况下,以下的两个命令等效 +``` +taos +taos -u root +``` +用户名为从配置中读取的`defaultUser`配置项。如果更改`defaultUser abc`,则以下两个命令等效: +``` +taos +taos -u abc +``` + +**defaultPass** +- 默认值:taosdata + +登录用户名,客户端登录的时候,如果不指定密码,则自动使用该密码登录。默认情况下,以下的两个命令等效 +``` +taos +taos -ptaosdata +``` + +TCP/UDP端口,以及日志的配置参数,与server的配置参数完全一样。使用命令`taos -?` 可查看`taos`允许的可选项。 ## 用户管理 @@ -124,6 +337,8 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。 insert into tb1 file a.csv b.csv tb2 c.csv … import into tb1 file a.csv b.csv tb2 c.csv … ``` +> 注意:导入的CSV文件不能够带表头, 且表的列与CSV文件的列需要严格对应。 +> 同样还可以使用[样例数据导入工具][1]对数据进行横向和纵向扩展导入。 ## 数据导出 @@ -191,6 +406,9 @@ KILL STREAM ## 系统监控 -TDengine启动后,会自动创建一个监测数据库SYS,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。 +TDengine启动后,会自动创建一个监测数据库`LOG`,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果。 + +这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。 + -这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 \ No newline at end of file +[1]: https://github.com/taosdata/TDengine/tree/develop/importSampleData \ No newline at end of file diff --git a/documentation/webdocs/markdowndocs/advanced features-ch.md b/documentation/webdocs/markdowndocs/advanced features-ch.md index 14a2801209c9b92ed16d38ed220cee5c3684cd4d..9dc289a8d5765d7ffc5dc9bab5267b61559f0d02 100644 --- a/documentation/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation/webdocs/markdowndocs/advanced features-ch.md @@ -63,28 +63,11 @@ CREATE TABLE QUERY_RES ## 数据订阅(Publisher/Subscriber) 基于数据天然的时间序列特性,TDengine的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine中里每一张表均可视为一个标准的消息队列。 -TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API,用户可订阅数据库中的某一张表(或超级表)。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 +TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API,用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 TDengine的订阅与推送服务的状态是客户端维持,TDengine服务器并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 -#### API说明 - -使用订阅的功能,主要API如下: - -
      -
    • TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)

      该函数负责启动订阅服务。其中参数说明:

      • -
      • host:主机IP地址

      • -
      • user:数据库登录用户名

      • -
      • pass:密码

      • -
      • db:数据库名称

      • -
      • table:(超级) 表的名称

      • -
      • time:启动时间,Unix Epoch时间,单位为毫秒。从1970年1月1日起计算的毫秒数。如果设为0,表示从当前时间开始订阅

      • -
      • mseconds:查询数据库更新的时间间隔,单位为毫秒。一般设置为1000毫秒。返回值为指向TDengine_SUB 结构的指针,如果返回为空,表示失败。

      • -
    • TAOS_ROW taos_consume(TAOS_SUB *tsub) -

      该函数用来获取订阅的结果,用户应用程序将其置于一个无限循环语句。如果数据库有新记录到达,该API将返回该最新的记录。如果没有新的记录,该API将阻塞。如果返回值为空,说明系统出错。参数说明:

      • tsub:taos_subscribe的结构体指针。

    • void taos_unsubscribe(TAOS_SUB *tsub)

      取消订阅。应用程序退出时,务必调用该函数以避免资源泄露。

    • -
    • int taos_num_subfields(TAOS_SUB *tsub)

      获取返回的一行记录中数据包含多少列。

    • -
    • TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)

      获取每列数据的属性(数据类型、名字、长度),与taos_num_subfileds配合使用,可解析返回的每行数据。

    -示例代码:请看安装包中的的示范程序 +订阅相关API文档请见 [C/C++ 数据订阅接口](https://www.taosdata.com/cn/documentation/connector/#C/C++-%E6%95%B0%E6%8D%AE%E8%AE%A2%E9%98%85%E6%8E%A5%E5%8F%A3),《[TDEngine中订阅的用途和用法](https://www.taosdata.com/blog/2020/02/12/1277.html)》则以一个示例详细介绍了这些API的用法。 ## 缓存 (Cache) TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。 @@ -93,7 +76,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接 TDengine分配固定大小的内存空间作为缓存空间,缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间,TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点(virtual node)创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池,不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。 -TDengine将内存池按块划分进行管理,数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好的,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的,块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定,每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: cache*ablocks*tables。内存块参数cache不宜过小,一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2,保证每张表平均至少能分配两个内存块。 +TDengine将内存池按块划分进行管理,数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好的,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的,块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定,每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: `cache * ablocks * tables`。内存块参数cache不宜过小,一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2,保证每张表平均至少能分配两个内存块。 你可以通过函数last_row快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如: diff --git a/documentation/webdocs/markdowndocs/advanced features.md b/documentation/webdocs/markdowndocs/advanced features.md index e841a5a6a531c94908b0f027d2d3e808d40ecac5..3eae454da59eec4aa7ea73741dcde2e882cb0821 100644 --- a/documentation/webdocs/markdowndocs/advanced features.md +++ b/documentation/webdocs/markdowndocs/advanced features.md @@ -62,7 +62,7 @@ Time series data is a sequence of data points over time. Inside a table, the dat To reduce the development complexity and improve data consistency, TDengine provides the pub/sub functionality. To publish a message, you simply insert a record into a table. Compared with popular messaging tool Kafka, you subscribe to a table or a SQL query statement, instead of a topic. Once new data points arrive, TDengine will notify the application. The process is just like Kafka. -The detailed API will be introduced in the [connectors](https://www.taosdata.com/en/documentation/connector/) section. +The API documentation is at [C/C++ subscription API](https://www.taosdata.com/en/documentation/connector/#C/C++-subscription-API) section, and you can find more information from blog article (only Chinese version at present) [The usage of subscription](https://www.taosdata.com/blog/2020/02/12/1277.html). ##Caching TDengine allocates a fixed-size buffer in memory, the newly arrived data will be written into the buffer first. Every device or table gets one or more memory blocks. For typical IoT scenarios, the hot data shall always be newly arrived data, they are more important for timely analysis. Based on this observation, TDengine manages the cache blocks in First-In-First-Out strategy. If no enough space in the buffer, the oldest data will be saved into hard disk first, then be overwritten by newly arrived data. TDengine also guarantees every device can keep at least one block of data in the buffer. diff --git a/documentation/webdocs/markdowndocs/connector-ch.md b/documentation/webdocs/markdowndocs/connector-ch.md index 9261b9eef6bfa8d726dee2f21e58c312a8e3c587..b5d8fb5afb12ede82f2cdcd9ea29e20e8a82d6b8 100644 --- a/documentation/webdocs/markdowndocs/connector-ch.md +++ b/documentation/webdocs/markdowndocs/connector-ch.md @@ -164,82 +164,140 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 ### C/C++ 数据订阅接口 -订阅API目前支持订阅一张表,并通过定期轮询的方式不断获取写入表中的最新数据。 +订阅API目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。 -- `TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)` +* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` - 该API用来启动订阅,需要提供的参数包含:TDengine管理主节点的IP地址、用户名、密码、数据库、数据库表的名字;time是开始订阅消息的时间,是从1970年1月1日起计算的毫秒数,为长整型, 如果设为0,表示从当前时间开始订阅;mseconds为查询数据库更新的时间间隔,单位为毫秒,建议设为1000毫秒。返回值为一指向TDengine_SUB结构的指针,如果返回为空,表示失败。 + 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: + * taos:已经建立好的数据库连接 + * restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + * topic:订阅的主题(即名称),此参数是订阅的唯一标识 + * sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 + * fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` + * param:调用回调函数时的附加参数,系统API将其原样传递到回调函数,不进行任何处理 + * interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用`taos_consume`的间隔小于此周期,API将会阻塞,直到时间间隔超过此周期。 -- `TAOS_ROW taos_consume(TAOS_SUB *tsub)` +* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` - 该API用来获取最新消息,应用程序一般会将其置于一个无限循环语句中。其中参数tsub是taos_subscribe的返回值。如果数据库有新的记录,该API将返回,返回参数是一行记录。如果没有新的记录,该API将阻塞。如果返回值为空,说明系统出错,需要检查系统是否还在正常运行。 + 异步模式下,回调函数的原型,其参数为: + * tsub:订阅对象 + * res:查询结果集,注意结果集中可能没有记录 + * param:调用 `taos_subscribe`时客户程序提供的附加参数 + * code:错误码 -- `void taos_unsubscribe(TAOS_SUB *tsub)` - 该API用于取消订阅,参数tsub是taos_subscribe的返回值。应用程序退出时,需要调用该API,否则有资源泄露。 +* `TAOS_RES *taos_consume(TAOS_SUB *tsub)` -- `int taos_num_subfields(TAOS_SUB *tsub)` + 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。 - 该API用来获取返回的一排数据中数据的列数 +* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` -- `TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)` + 取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 - 该API用来获取每列数据的属性(数据类型、名字、字节数),与taos_num_subfields配合使用,可用来解析返回的一排数据。 ## Java Connector -### JDBC接口 +TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 -如果用户使用Java开发企业级应用,可选用 TDengine 提供的 JDBC Driver 来调用服务。TDengine 提供的 JDBC Driver 是标准 JDBC 规范的子集,遵循 JDBC 标准 (3.0)API 规范,支持现有的各种 Java 开发框架。目前 TDengine 的 JDBC driver 已经发布到 Sonatype Maven Repository。因此用户开发时,需要在 pom.xml 文件中进行如下配置: +由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 -```xml +* libtaos.so + 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 + +* taos.dll + 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 + +> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 - - - oss-sonatype - oss-sonatype - https://oss.sonatype.org/content/groups/public - - - +TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: + +* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 +* 由于不支持删除和修改,所以也不支持事务操作。 +* 目前不支持表间的 union 操作。 +* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。 + + +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 + +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| --- | --- | --- | +| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | Java DataType | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT, TINYINT |java.lang.Short | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | + +## 如何获取 TAOS-JDBCDriver + +### maven 仓库 + +目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 +* [sonatype][8] +* [mvnrepository][9] +* [maven.aliyun][10] + +maven 项目中使用如下 pom.xml 配置即可: + +```xml com.taosdata.jdbc taos-jdbcdriver - 1.0.1 + 1.0.3 - ``` -TDengine 的驱动程序包的在不同操作系统上依赖不同的本地函数库(均由C语言编写)。Linux系统上,依赖一个名为`libtaos.so` 的本地库,.so即"Shared Object"缩写。成功安装TDengine后,`libtaos.so` 文件会被自动拷贝至`/usr/local/lib/taos`目录下,该目录也包含在Linux上自动扫描路径上。Windows系统上,JDBC驱动程序依赖于一个名为`taos.dll` 的本地库,.dll是动态链接库"Dynamic Link Library"的缩写。Windows上成功安装客户端后,JDBC驱动程序包默认位于`C:/TDengine/driver/JDBC/`目录下;其依赖的动态链接库`taos.dll`文件位于`C:/TDengine/driver/C`目录下,`taos.dll` 会被自动拷贝至系统默认搜索路径`C:/Windows/System32`下。 +### 源码编译打包 -TDengine的JDBC Driver遵循标准JDBC规范,开发人员可以参考Oracle官方的JDBC相关文档来找到具体的接口和方法的定义与用法。TDengine的JDBC驱动在连接配置和支持的方法上与传统数据库驱动稍有不同。 +下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 -TDengine的JDBC URL规范格式为: -`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +## 使用说明 -其中,`{}`中的内容必须,`[]`中为可选。配置参数说明如下: +### 获取连接 -- user:登陆TDengine所用用户名;默认值root -- password:用户登陆密码;默认值taosdata -- charset:客户端使用的字符集;默认值为系统字符集 -- cfgdir:客户端配置文件目录路径;Linux OS上默认值`/etc/taos` ,Windows OS上默认值 `C:/TDengine/cfg` -- locale:客户端语言环境;默认值系统当前locale -- timezone:客户端使用的时区;默认值为系统当前时区 +如下所示配置即可获取 TDengine Connection: +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` +> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 -以上所有参数均可在调用java.sql.DriverManager类创建连接时指定,示例如下: +TDengine 的 JDBC URL 规范格式为: +`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` -```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.util.Properties; -import com.taosdata.jdbc.TSDBDriver; +其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: +* user:登录 TDengine 用户名,默认值 root。 +* password:用户登录密码,默认值 taosdata。 +* charset:客户端使用的字符集,默认值为系统字符集。 +* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* locale:客户端语言环境,默认值系统当前 locale。 +* timezone:客户端使用的时区,默认值为系统当前时区。 + +以上参数可以在 3 处配置,`优先级由高到低`分别如下: +1. JDBC URL 参数 + 如上所述,可以在 JDBC URL 的参数中指定。 +2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) +```java public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); @@ -252,39 +310,318 @@ public Connection getConn() throws Exception{ } ``` -这些配置参数中除了cfgdir外,均可在客户端配置文件taos.cfg中进行配置。调用java.sql.DriverManager时声明的配置参数优先级最高,JDBC URL的优先级次之,配置文件的优先级最低。例如charset同时在配置文件taos.cfg中配置,也在JDBC URL中配置,则使用JDBC URL中的配置值。 +3. 客户端配置文件 taos.cfg + + linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 +```properties +# client default username +# defaultUser root + +# client default password +# defaultPass taosdata + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` +> 更多详细配置请参考[客户端配置][13] + +### 创建数据库和表 + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` +> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 + +### 插入数据 + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` +> now 为系统内部函数,默认为服务器当前时间。 +> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 + +### 查询数据 + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` +> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 + + +### 关闭资源 + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` +> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 +## 与连接池使用 + +**HikariCP** + +* 引入相应 HikariCP maven 依赖: +```xml + + com.zaxxer + HikariCP + 3.4.1 + +``` + +* 使用示例如下: +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMinimumIdle(3); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool + config.setIdleTimeout(60000); // max idle time for recycle idle connection + config.setConnectionTestQuery("describe log.dn"); //validation query + config.setValidationTimeout(3000); //validation query timeout + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 +> 更多 HikariCP 使用问题请查看[官方说明][5] + +**Druid** -此外,尽管TDengine的JDBC驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致TDengine的Java API并不能与标准完全相同。对于有大量关系型数据库开发经验而初次接触TDengine的开发者来说,有以下一些值的注意的地方: +* 引入相应 Druid maven 依赖: -* TDengine不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法 -* 目前TDengine不支持表间的join或union操作,因此也缺乏对该部分API的支持 -* TDengine支持批量写入,但是支持停留在SQL语句级别,而不是API级别,也就是说用户需要通过写特殊的SQL语句来实现批量 -* 目前TDengine不支持嵌套查询(nested query),对每个Connection的实例,至多只能有一个打开的ResultSet实例;如果在ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver则会自动关闭上一个ResultSet +```xml + + com.alibaba + druid + 1.1.20 + +``` -对于TDengine操作的报错信息,用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码,请参考TDengine提供的使用示范项目`JDBCDemo`。 +* 使用示例如下: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 更多 druid 使用问题请查看[官方说明][6] + +**注意事项** +* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 + +如下所示,`select server_status()` 执行成功会返回 `1`。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +## 与框架使用 + +* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11] +* Springboot + Mybatis 中使用,可参考 [springbootdemo][12] + +## 常见问题 + +* java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **原因**:程序没有找到依赖的本地函数库 taos。 + + **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + +* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **原因**:目前 TDengine 只支持 64 位 JDK。 + + **解决方法**:重新安装 64 位 JDK。 + +* 其它问题请参考 [Issues][7] ## Python Connector +### 安装准备 +* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口) +* 已安装python 2.7 or >= 3.4 +* 已安装pip + ### Python客户端安装 +#### Linux + 用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装: -​ `pip install src/connector/python/[linux|windows]/python2/` +​ `pip install src/connector/python/linux/python2/` 或 -​ `pip install src/connector/python/[linux|windows]/python3/` +​ `pip install src/connector/python/linux/python3/` -如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。 -对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。所有TDengine的连接器,均需依赖taos.dll。 +#### Windows +在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面 +```cmd +cd C:\TDengine\connector\python\windows +pip install python2\ +``` +或 +```cmd +cd C:\TDengine\connector\python\windows +pip install python3\ +``` -### Python客户端接口 +*如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。 +对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。 -在使用TDengine的python接口时,需导入TDengine客户端模块: +### 使用 -``` +#### 代码示例 + +* 导入TDengine客户端模块 + +```python import taos ``` +* 获取连接 +```python +conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") +c1 = conn.cursor() +``` +*host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录 + +* 写入数据 +```python +import datetime + +# 创建数据库 +c1.execute('create database db') +c1.execute('use db') +# 建表 +c1.execute('create table tb (ts timestamp, temperature int, humidity float)') +# 插入数据 +start_time = datetime.datetime(2019, 11, 1) +affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time) +# 批量插入数据 +time_interval = datetime.timedelta(seconds=60) +sqlcmd = ['insert into tb values'] +for irow in range(1,11): + start_time += time_interval + sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2)) +affected_rows = c1.execute(' '.join(sqlcmd)) +``` + +* 查询数据 +```python +c1.execute('select * from tb') +# 拉取查询结果 +data = c1.fetchall() +# 返回的结果是一个列表,每一行构成列表的一个元素 +numOfRows = c1.rowcount +numOfCols = len(c1.description) +for irow in range(numOfRows): + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) + +# 直接使用cursor 循环拉取查询结果 +c1.execute('select * from tb') +for data in c1: + print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) +``` + +* 创建订阅 +```python +# 创建一个主题为 'test' 消费周期为1000毫秒的订阅 +# 第一个参数为 True 表示重新开始订阅,如为 False 且之前创建过主题为 'test' 的订阅,则表示继续消费此订阅的数据,而不是重新开始消费所有数据 +sub = conn.subscribe(True, "test", "select * from meters;", 1000) +``` + +* 消费订阅的数据 +```python +data = sub.consume() +for d in data: + print(d) +``` + +* 取消订阅 +```python +sub.close() +``` + + +* 关闭连接 +```python +c1.close() +conn.close() +``` +#### 帮助信息 用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法: @@ -306,21 +643,34 @@ import taos ### HTTP请求格式 -​ `http://:/rest/sql` +``` +http://:/rest/sql +``` + +参数说明: -​ 参数说明: +- IP: 集群中的任一台主机 +- PORT: 配置文件中httpPort配置项,缺省为6020 -​ IP: 集群中的任一台主机 +例如:http://192.168.0.1:6020/rest/sql 是指向IP地址为192.168.0.1的URL. -​ PORT: 配置文件中httpPort配置项,缺省为6020 +HTTP请求的Header里需带有身份认证信息,TDengine支持Basic认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。 -如:http://192.168.0.1:6020/rest/sql 是指向IP地址为192.168.0.1的URL. +- 自定义身份认证信息如下所示(稍后介绍) -HTTP请求的Header里需带有身份认证信息,TDengine单机版仅支持Basic认证机制。 +``` +Authorization: Taosd +``` + +- Basic身份认证信息如下所示 + +``` +Authorization: Basic +``` HTTP请求的BODY里就是一个完整的SQL语句,SQL语句中的数据表应提供数据库前缀,例如\.\。如果表名不带数据库前缀,系统会返回错误。因为HTTP模块只是一个简单的转发,没有当前DB的概念。 -使用curl来发起一个HTTP Request, 语法如下: +使用curl通过自定义身份认证方式来发起一个HTTP Request, 语法如下: ``` curl -H 'Authorization: Basic ' -d '' :/rest/sql @@ -332,11 +682,12 @@ curl -H 'Authorization: Basic ' -d '' :/rest/sql curl -u username:password -d '' :/rest/sql ``` -其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串,例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==` +其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串, 例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==` ### HTTP返回格式 -返回值为JSON格式,如下: +返回值为JSON格式,如下: + ``` { "status": "succ", @@ -351,26 +702,60 @@ curl -u username:password -d '' :/rest/sql 说明: -- 第一行”status”告知操作结果是成功还是失败; -- 第二行”head”是表的定义,如果不返回结果集,仅有一列“affected_rows”; -- 第三行是具体返回的数据,一排一排的呈现。如果不返回结果集,仅[[affected_rows]] -- 第四行”rows”表明总共多少行数据 +- status: 告知操作结果是成功还是失败 +- head: 表的定义,如果不返回结果集,仅有一列“affected_rows” +- data: 具体返回的数据,一排一排的呈现,如果不返回结果集,仅[[affected_rows]] +- rows: 表明总共多少行数据 -### 使用示例 +### 自定义授权码 + +HTTP请求中需要带有授权码``, 用于身份识别。授权码通常由管理员提供, 可简单的通过发送`HTTP GET`请求来获取授权码, 操作如下: -- 在demo库里查询表t1的所有记录, curl如下: +``` +curl http://:6020/rest/login// +``` - `curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql` +其中, `ip`是TDengine数据库的IP地址, `username`为数据库用户名, `password`为数据库密码, 返回值为`JSON`格式, 各字段含义如下: - 返回值: +- status:请求结果的标志位 + +- code:返回值代码 + +- desc: 授权码 + +获取授权码示例: + +``` +curl http://192.168.0.1:6020/rest/login/root/taosdata +``` + +返回值: + +``` +{ + "status": "succ", + "code": 0, + "desc": +"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" +} +``` + +### 使用示例 + +- 在demo库里查询表t1的所有记录: + +``` +curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql` +``` +返回值: ``` { "status": "succ", "head": ["column1","column2","column3"], "data": [ - ["2017-12-12 23:44:25.730", 1, 2.3], - ["2017-12-12 22:44:25.728", 4, 5.6] + ["2017-12-12 22:44:25.728",4,5.60000], + ["2017-12-12 23:44:25.730",1,2.30000] ], "rows": 2 } @@ -378,9 +763,11 @@ curl -u username:password -d '' :/rest/sql - 创建库demo: - `curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql` +``` +curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql` +``` - 返回值: +返回值: ``` { "status": "succ", @@ -390,11 +777,71 @@ curl -u username:password -d '' :/rest/sql } ``` +### 其他用法 + +#### 结果集采用Unix时间戳 + +HTTP请求URL采用`sqlt`时,返回结果集的时间戳将采用Unix时间戳格式表示,例如 + +``` +curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sqlt +``` + +返回值: + +``` +{ + "status": "succ", + "head": ["column1","column2","column3"], + "data": [ + [1513089865728,4,5.60000], + [1513093465730,1,2.30000] + ], + "rows": 2 +} +``` + +#### 结果集采用UTC时间字符串 + +HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间字符串表示,例如 +``` + curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sqlutc +``` + +返回值: + +``` +{ + "status": "succ", + "head": ["column1","column2","column3"], + "data": [ + ["2017-12-12T22:44:25.728+0800",4,5.60000], + ["2017-12-12T23:44:25.730+0800",1,2.30000] + ], + "rows": 2 +} +``` + +### 重要配置项 + +下面仅列出一些与RESTFul接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 + +- httpIp: 对外提供RESTFul服务的IP地址,默认绑定到0.0.0.0 +- httpPort: 对外提供RESTFul服务的端口号,默认绑定到6020 +- httpMaxThreads: 启动的线程数量,默认为2 +- httpCacheSessions: 缓存连接的数量,并发请求数目需小于此数值的10倍,默认值为100 +- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240 +- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 +- httpDebugFlag: 日志开关,131:仅错误和报警信息,135:所有,默认131 + + ## Go Connector +### linux环境 + #### 安装TDengine -Go的链接器使用了到了 libtaos.so 和taos.h,因此,在使用Go连接器之前,需要在程序运行的机器上安装TDengine以获得相关的驱动文件。 +Go的连接器使用到了 libtaos.so 和taos.h,因此,在使用Go连接器之前,需要在程序运行的机器上安装TDengine以获得相关的驱动文件。 #### Go语言引入package TDengine提供了GO驱动程序“taosSql”包。taosSql驱动包是基于GO的“database/sql/driver”接口的实现。用户可以通过`go get`命令来获取驱动包。 @@ -452,4 +899,333 @@ taosSql驱动包内采用cgo模式,调用了TDengine的C/C++同步接口,与 3. 创建表、写入和查询数据 -在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码 +在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码。 + +### windows环境 + +在windows上使用Go,请参考  +[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/) + + + +## Node.js Connector + +TDengine 同时也提供了node.js 的连接器。用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。[具体安装步骤如下](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs): + +首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器. + +```cmd +npm install td-connector +``` +我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下 + +我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: + +### Unix + +- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) +- `make` +- c语言编译器比如[GCC](https://gcc.gnu.org) + +### macOS + +- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) + +- Xcode + + - 然后通过Xcode安装 + + ``` + Command Line Tools + ``` + + 在 + ``` + Xcode -> Preferences -> Locations + ``` + + 目录下可以找到这个工具。或者在终端里执行 + + ``` + xcode-select --install + ``` + + + - 该步执行后 `gcc` 和 `make`就被安装上了 + +### Windows + +#### 安装方法1 + +使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 + +#### 安装方法2 + +手动安装以下工具: + +- 安装Visual Studio相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) +- 安装 [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` +- 进入`cmd`命令行界面, `npm config set msvs_version 2017` + +如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) + +如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64". + +### 使用方法 + +(http://docs.taosdata.com/node) +以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[该文档](http://docs.taosdata.com/node) + +#### 连接 + +使用node.js连接器时,必须先require ```td-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信 + +```javascript +const taos = require('td-connector'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) +var cursor = conn.cursor(); // Initializing a new cursor +``` + +关闭连接可执行 + +```javascript +conn.close(); +``` + +#### 查询 + +可通过 ```cursor.query``` 函数来查询数据库。 + +```javascript +var query = cursor.query('show databases;') +``` + +查询的结果可以通过 ```query.execute()``` 函数获取并打印出来 + +```javascript +var promise = query.execute(); +promise.then(function(result) { + result.pretty(); +}); +``` +格式化查询语句还可以使用```query```的```bind```方法。如下面的示例:```query```会自动将提供的数值填入查询语句的```?```里。 + +```javascript +var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5); +query.execute().then(function(result) { + result.pretty(); +}) +``` +如果在```query```语句里提供第二个参数并设为```true```也可以立即获取查询结果。如下: + + +```javascript +var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true) +promise.then(function(result) { + result.pretty(); +}) +``` +#### 异步函数 +异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。 +```javascript +var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a() +var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a(); +promise1.then(function(result) { + result.pretty(); +}) +promise2.then(function(result) { + result.pretty(); +}) +``` + + +### 示例 +[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 + +[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. + +## CSharp Connector + +在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。 + +#### 安装TDengine客户端 + +C#连接器需要使用`libtaos.so`和`taos.h`。因此,在使用C#连接器之前,需在程序运行的Windows环境安装TDengine的Windows客户端,以便获得相关驱动文件。 + +安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件 + +- TDengineDriver.cs 调用taos.dll文件的Native C方法 +- TDengineTest.cs 参考程序示例 + +在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件 + +#### 使用方法 + +- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中 +- 参考TDengineTest.cs来定义数据库连接参数,及执行数据插入、查询等操作的方法 +- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中 + +#### 注意事项 + +- `taos.dll`文件使用x64平台编译,所以.NET项目在生成.exe文件时,“解决方案”/“项目”的“平台”请均选择“x64”。 +- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过,其它VS版本尚待验证。 + +#### 第三方驱动 + +Maikebing.Data.Taos是一个基于TDengine的RESTful Connector构建的ADO.Net提供器,该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考 + +``` +https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos +``` + +## Windows客户端及程序接口 + +### 客户端安装 + +在Windows操作系统下,TDengine提供64位的Windows客户端([点击下载](https://www.taosdata.com/cn/all-downloads/#tdengine_win-list)),客户端安装程序为.exe文件,运行该文件即可安装,安装路径为C:\TDengine。Windows的客户端可运行在主流的64位Windows平台之上,客户端目录结构如下: + +``` +├── cfg +├───└── taos.cfg +├── connector +├───├── go +├───├── grafana +├───├── jdbc +├───└── python +├── driver +├───├── libtaos.dll +├───├── libtaos.dll.a +├───├── taos.dll +├───├── taos.exp +├───└── taos.lib +├── examples +├───├── bash +├───├── c +├───├── C# +├───├── go +├───├── JDBC +├───├── lua +├───├── matlab +├───├── nodejs +├───├── python +├───├── R +├───└── rust +├── include +├───└── taos.h +└── taos.exe +``` + +其中,最常用的文件列出如下: + ++ Client可执行文件: C:/TDengine/taos.exe ++ 配置文件: C:/TDengine/cfg/taos.cfg ++ 驱动程序目录: C:/TDengine/driver ++ 驱动程序头文件: C:/TDengine/include ++ JDBC驱动程序目录: C:/TDengine/connector/jdbc ++ GO驱动程序目录:C:/TDengine/connector/go ++ Python驱动程序目录:C:/TDengine/connector/python ++ C#驱动程序及示例代码: C:/TDengine/examples/C# ++ 日志目录(第一次运行程序时生成):C:/TDengine/log + +### 注意事项 + +#### Shell工具注意事项 + +在开始菜单中搜索cmd程序,通过命令行方式执行taos.exe即可打开TDengine的Client程序,如下所示,其中ServerIP为TDengine所在Linux服务器的IP地址 + +``` +taos -h +``` + +在cmd中对taos的使用与Linux平台没有差别,但需要注意以下几点: + ++ 确保Windows防火墙或者其他杀毒软件处于关闭状态,TDengine的服务端与客户端通信的端口请参考`服务端配置`章节 ++ 确认客户端连接时指定了正确的服务器IP地址 ++ ping服务器IP,如果没有反应,请检查你的网络 + +#### C++接口注意事项 + +TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,需要包含TDengine头文件taos.h,连接时需要链接TDengine库taos.lib,运行时将taos.dll放到可执行文件目录下。 + +#### Go接口注意事项 + +TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,除了需要Go的驱动包(C:\TDengine\connector\go)外,还需要包含TDengine头文件taos.h,连接时需要链接TDengine库libtaos.dll、libtaos.dll.a(C:\TDengine\driver),运行时将libtaos.dll、libtaos.dll.a放到可执行文件目录下。 + +使用参考请见: + +[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/) + +#### JDBC接口注意事项 + +在Windows系统上,应用程序可以使用JDBC接口来操纵数据库,使用JDBC接口的注意事项如下: + ++ 将JDBC驱动程序(JDBCDriver-1.0.0-dist.jar)放置到当前的CLASS_PATH中; + ++ 将Windows开发包(taos.dll)放置到system32目录下。 + +#### python接口注意事项 +在Windows系统上,应用程序可以通过导入taos这个模块来操纵数据库,使用python接口的注意事项如下: + ++ 确定在Windows上安装了TDengine客户端 + ++ 将Windows开发包(taos.dll)放置到system32目录下。 + +## Mac客户端及程序接口 + +### 客户端安装 + +在Mac操作系统下,TDengine提供64位的Mac客户端([2月10日起提供下载](https://www.taosdata.com/cn/all-downloads/#tdengine_mac-list)),客户端安装程序为.tar.gz文件,解压并运行其中的install_client.sh后即可完成安装,安装路径为/usr/loca/taos。客户端目录结构如下: + +``` +├── cfg +├───└── taos.cfg +├── connector +├───├── go +├───├── grafana +├───├── jdbc +├───└── python +├── driver +├───├── libtaos.1.6.5.1.dylib +├── examples +├───├── bash +├───├── c +├───├── C# +├───├── go +├───├── JDBC +├───├── lua +├───├── matlab +├───├── nodejs +├───├── python +├───├── R +├───└── rust +├── include +├───└── taos.h +└── bin +├───└── taos +``` + +其中,最常用的文件列出如下: + ++ Client可执行文件: /usr/local/taos/bin/taos 软连接到 /usr/local/bin/taos ++ 配置文件: /usr/local/taos/cfg/taos.cfg 软连接到 /etc/taos/taos.cfg ++ 驱动程序目录: /usr/local/taos/driver/libtaos.1.6.5.1.dylib 软连接到 /usr/local/lib/libtaos.dylib ++ 驱动程序头文件: /usr/local/taos/include/taos.h 软连接到 /usr/local/include/taos.h ++ 日志目录(第一次运行程序时生成):~/TDengineLog + + + +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3 +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B diff --git a/importSampleData/.gitignore b/importSampleData/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2283b63c52940e30b104289ce0c6c05cac75f197 --- /dev/null +++ b/importSampleData/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +.idea/ +.vscode/ \ No newline at end of file diff --git a/importSampleData/LICENSE b/importSampleData/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0ad25db4bd1d86c452db3f9602ccdbe172438f52 --- /dev/null +++ b/importSampleData/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/importSampleData/README.md b/importSampleData/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ee3a6e073c18b618af49a9c0b6d2d6d07718f00f --- /dev/null +++ b/importSampleData/README.md @@ -0,0 +1,245 @@ +## 样例数据导入 + +该工具可以根据用户提供的 `json` 或 `csv` 格式样例数据文件快速导入 `TDengine`,目前仅能在 Linux 上运行。 + +为了体验写入和查询性能,可以对样例数据进行横向、纵向扩展。横向扩展是指将一个表(监测点)的数据克隆到多张表,纵向扩展是指将样例数据中的一段时间范围内的数据在时间轴上复制。该工具还支持历史数据导入至当前时间后持续导入,这样可以测试插入和查询并行进行的场景,以模拟真实环境。 + +## 下载安装 + +### 下载可执行文件 + +由于该工具使用 go 语言开发,为了方便使用,项目中已经提供了编译好的可执行文件 `bin/taosimport`。通过 `git clone https://github.com/taosdata/TDengine.git` 命令或者直接下载 `ZIP` 文件解压进入样例导入程序目录 `cd importSampleData`,执行 `bin/taosimport`。 + +### go 源码编译 + +由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2],而且需要安装 TDengine 的 Go Connector, 具体请参考[TDengine 连接器文档][3]。安装完成之后,执行以下命令即可编译成可执行文件 `bin/taosimport`。 +```shell +go get https://github.com/taosdata/TDengine/importSampleData +cd $GOPATH/src/github.com/taosdata/TDengine/importSampleData +go build -o bin/taosimport app/main.go +``` + +> 注:由于目前 TDengine 的 go connector 只支持 linux 环境,所以该工具暂时只能在 linux 系统中运行。 +> 如果 go get 失败可以下载之后复制 `github.com/taosdata/TDengine/importSampleData` 文件夹到 $GOPATH 的 src 目录下再执行 `go build -o bin/taosimport app/main.go`。 + +## 使用 + +### 快速体验 + +执行命令 `bin/taosimport` 会根据默认配置执行以下操作: +1. 创建数据库 + + 自动创建名称为 `test_yyyyMMdd` 的数据库。 + +2. 创建超级表 + + 根据配置文件 `config/cfg.toml` 中指定的 `sensor_info` 场景信息创建相应的超级表。 + > 建表语句: create table s_sensor_info(ts timestamp, temperature int, humidity float) tags(location binary(20), color binary(16), devgroup int); + +3. 自动建立子表并插入数据 + + 根据配置文件 `config/cfg.toml` 中 `sensor_info` 场景指定的 `data/sensor_info.csv` 样例数据进行横向扩展 `100` 倍(可通过 hnum 参数指定),即自动创建 `10*100=1000` 张子表(默认样例数据中有 10 张子表,每张表 100 条数据),启动 `10` 个线程(可通过 thread 参数指定)对每张子表循环导入 `1000` 次(可通过 vnum 参数指定)。 + +进入 `taos shell`,可运行如下查询验证: + +* 查询记录数 + + ```shell + taos> use test_yyyyMMdd; + taos> select count(*) from s_sensor_info; + ``` +* 查询各个分组的记录数 + + ```shell + taos> select count(*) from s_sensor_info group by devgroup; + ``` +* 按 1h 间隔查询各聚合指标 + + ```shell + taos> select count(temperature), sum(temperature), avg(temperature) from s_sensor_info interval(1h); + ``` +* 查询指定位置最新上传指标 + + ```shell + taos> select last(*) from s_sensor_info where location = 'beijing'; + ``` +> 更多查询及函数使用请参考 [数据查询][4] + +### 详细使用说明 + +执行命令 `bin/taosimport -h` 可以查看详细参数使用说明: + +* -cfg string + + 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 `config/cfg.toml`。 + +* -cases string + + 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 `[usecase]` 查看,可同时导入多个场景,中间使用逗号分隔,如:`sensor_info,camera_detection`,默认为 `sensor_info`。 + +* -hnum int + + 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 `t_0` 数据,指定 hnum 为 2 时会根据原有表名创建 `t_0、t_1` 两张子表。默认为 100。 + +* -vnum int + + 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次。 + +* -delay int + + 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。 + +* -tick int + + 打印统计信息的时间间隔,默认 2000 ms。 + +* -save int + + 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。 + +* -savetb int + + 当 save 为 1 时保存统计信息的表名, 默认 statistic。 + +* -auto int + + 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0。 + +* -start string + + 导入的记录开始时间,格式为 `"yyyy-MM-dd HH:mm:ss.SSS"`,不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空。 + +* -interval int + + 导入的记录时间间隔,该设置只会在指定 `auto=1` 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。 + +* -thread int + + 执行导入数据的线程数目,默认为 10。 + +* -batch int + + 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录。 + +* -host string + + 导入的 TDengine 服务器 IP,默认为 127.0.0.1。 + +* -port int + + 导入的 TDengine 服务器端口,默认为 6030。 + +* -user string + + 导入的 TDengine 用户名,默认为 root。 + +* -password string + + 导入的 TDengine 用户密码,默认为 taosdata。 + +* -dropdb int + + 导入数据之前是否删除数据库,1 是,0 否, 默认 0。 + +* -db string + + 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd。 + +* -dbparam string + + 当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 `days 10 cache 16000 ablocks 4`,默认为空。 + +### 常见使用示例 + +* `bin/taosimport -cfg config/cfg.toml -cases sensor_info,camera_detection -hnum 1 -vnum 10` + + 执行上述命令后会将 sensor_info、camera_detection 两个场景的数据各导入 10 次。 + +* `bin/taosimport -cfg config/cfg.toml -cases sensor_info -hnum 2 -vnum 0 -start "2019-12-12 00:00:00.000" -interval 5000` + + 执行上述命令后会将 sensor_info 场景的数据横向扩展2倍从指定时间 `2019-12-12 00:00:00.000` 开始且记录间隔时间为 5000 毫秒开始导入,导入至当前时间后会自动持续导入。 + +### config/cfg.toml 配置文件说明 + +``` toml +# 传感器场景 +[sensor_info] # 场景名称 +format = "csv" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。 +filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据 +separator = "," # csv 样例文件中字段分隔符,默认逗号 + +stname = "sensor_info" # 超级表名称 +subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。 +timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp +timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式 +#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定 +tags = [ + # 标签列表,name 为标签名称,type 为标签类型 + { name = "location", type = "binary(20)" }, + { name = "color", type = "binary(16)" }, + { name = "devgroup", type = "int" }, +] + +fields = [ + # 字段列表,name 为字段名称,type 为字段类型 + { name = "ts", type = "timestamp" }, + { name = "temperature", type = "int" }, + { name = "humidity", type = "float" }, +] + +# 摄像头检测场景 +[camera_detection] # 场景名称 +format = "json" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。 +filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据 +#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置 + +stname = "camera_detection" # 超级表名称 +subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。 +timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp +timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式 +timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定 +tags = [ + # 标签列表,name 为标签名称,type 为标签类型 + { name = "home_id", type = "binary(30)" }, + { name = "object_type", type = "int" }, + { name = "object_kind", type = "binary(20)" }, +] + +fields = [ + # 字段列表,name 为字段名称,type 为字段类型 + { name = "ts", type = "timestamp" }, + { name = "states", type = "tinyint" }, + { name = "battery_voltage", type = "float" }, +] + +# other cases + +``` + +### 样例数据格式说明 + +#### json + +当配置文件 `config/cfg.toml` 中各场景的 format="json" 时,样例数据文件需要提供 tags 和 fields 字段列表中的字段值。样例数据格式如下: + +```json +{"home_id": "603", "sensor_id": "s100", "ts": "2019-01-01 00:00:00.000", "object_type": 1, "object_kind": "night", "battery_voltage": 0.8, "states": 1} +{"home_id": "604", "sensor_id": "s200", "ts": "2019-01-01 00:00:00.000", "object_type": 2, "object_kind": "day", "battery_voltage": 0.6, "states": 0} +``` + +#### csv + +当配置文件 `config/cfg.toml` 中各场景的 format="csv" 时,样例数据文件需要提供表头和对应的数据,其中字段分隔符由使用场景中 `separator` 指定,默认逗号。具体格式如下: + +```csv +devid,location,color,devgroup,ts,temperature,humidity +0, beijing, white, 0, 1575129600000, 16, 19.405091 +0, beijing, white, 0, 1575129601000, 22, 14.377142 +``` + + + +[1]: https://github.com/taosdata/TDengine +[2]: https://golang.org/doc/install +[3]: https://www.taosdata.com/cn/documentation/connector/#Go-Connector +[4]: https://www.taosdata.com/cn/documentation/taos-sql/#%E6%95%B0%E6%8D%AE%E6%9F%A5%E8%AF%A2 \ No newline at end of file diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go new file mode 100644 index 0000000000000000000000000000000000000000..aef413320710012fec79e56677e16864a881ca8f --- /dev/null +++ b/importSampleData/app/main.go @@ -0,0 +1,1080 @@ +package main + +import ( + "bufio" + "bytes" + "database/sql" + "encoding/json" + "flag" + "fmt" + "github.com/taosdata/TDengine/importSampleData/import" + "hash/crc32" + "io" + "log" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + _ "github.com/taosdata/TDengine/src/connector/go/src/taosSql" +) + +const ( + TIMESTAMP = "timestamp" + DATETIME = "datetime" + MILLISECOND = "millisecond" + DEFAULT_STARTTIME int64 = -1 + DEFAULT_INTERVAL int64 = 1*1000 + DEFAULT_DELAY int64 = -1 + DEFAULT_STATISTIC_TABLE = "statistic" + + JSON_FORMAT = "json" + CSV_FORMAT = "csv" + SUPERTABLE_PREFIX = "s_" + SUBTABLE_PREFIX = "t_" + + DRIVER_NAME = "taosSql" + STARTTIME_LAYOUT = "2006-01-02 15:04:05.000" + INSERT_PREFIX = "insert into " +) + +var ( + + cfg string + cases string + hnum int + vnum int + thread int + batch int + auto int + starttimestr string + interval int64 + host string + port int + user string + password string + dropdb int + db string + dbparam string + + dataSourceName string + startTime int64 + + superTableConfigMap = make(map[string]*superTableConfig) + subTableMap = make(map[string]*dataRows) + scaleTableNames []string + + scaleTableMap = make(map[string]*scaleTableInfo) + + successRows []int64 + lastStaticTime time.Time + lastTotalRows int64 + timeTicker *time.Ticker + delay int64 // default 10 milliseconds + tick int64 + save int + saveTable string +) + +type superTableConfig struct { + startTime int64 + endTime int64 + cycleTime int64 + avgInterval int64 + config dataimport.CaseConfig +} + +type scaleTableInfo struct { + scaleTableName string + subTableName string + insertRows int64 +} + +type tableRows struct { + tableName string // tableName + value string // values(...) +} + +type dataRows struct { + rows []map[string]interface{} + config dataimport.CaseConfig +} + +func (rows dataRows) Len() int { + return len(rows.rows) +} + +func (rows dataRows) Less(i, j int) bool { + itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp]) + jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp]) + return itime < jtime +} + +func (rows dataRows) Swap(i, j int) { + rows.rows[i], rows.rows[j] = rows.rows[j], rows.rows[i] +} + +func getPrimaryKey(value interface{}) int64 { + val, _ := value.(int64) + //time, _ := strconv.ParseInt(str, 10, 64) + return val +} + +func init() { + parseArg() //parse argument + + if db == "" { + //db = "go" + db = fmt.Sprintf("test_%s",time.Now().Format("20060102")) + } + + if auto == 1 && len(starttimestr) == 0 { + log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ") + } + + if len(starttimestr) != 0 { + t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local) + if err != nil { + log.Fatalf("param startTime %s error, %s\n", starttimestr, err) + } + + startTime = t.UnixNano() / 1e6 // as millisecond + }else{ + startTime = DEFAULT_STARTTIME + } + + dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port) + + printArg() + + log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) +} + +func main() { + + importConfig := dataimport.LoadConfig(cfg) + + var caseMinumInterval int64 = -1 + + for _, userCase := range strings.Split(cases, ",") { + caseConfig, ok := importConfig.UserCases[userCase] + + if !ok { + log.Println("not exist case: ", userCase) + continue + } + + checkUserCaseConfig(userCase, &caseConfig) + + //read file as map array + fileRows := readFile(caseConfig) + log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows)) + + if len(fileRows.rows) == 0 { + log.Printf("there is no valid line in file %s\n", caseConfig.FilePath) + continue + } + + _, exists := superTableConfigMap[caseConfig.Stname] + if !exists { + superTableConfigMap[caseConfig.Stname] = &superTableConfig{config:caseConfig} + } else { + log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname) + } + + var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows) + + // set super table's startTime, cycleTime and avgInterval + superTableConfigMap[caseConfig.Stname].startTime = start + superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval + superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime + + if caseMinumInterval == -1 || caseMinumInterval > avgInterval { + caseMinumInterval = avgInterval + } + + startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT) + log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime) + } + + if DEFAULT_DELAY == delay { + // default delay + delay = caseMinumInterval / 2 + if delay < 1 { + delay = 1 + } + log.Printf("actual delay is %d ms.", delay) + } + + superTableNum := len(superTableConfigMap) + if superTableNum == 0 { + log.Fatalln("no valid file, exited") + } + + start := time.Now() + // create super table + createSuperTable(superTableConfigMap) + log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6) + + //create sub table + start = time.Now() + createSubTable(subTableMap) + log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6) + + subTableNum := len(scaleTableMap) + + if subTableNum < thread { + thread = subTableNum + } + + filePerThread := subTableNum / thread + leftFileNum := subTableNum % thread + + var wg sync.WaitGroup + + start = time.Now() + + successRows = make([]int64, thread) + + startIndex, endIndex := 0, filePerThread + for i := 0; i < thread; i++ { + // start thread + if i < leftFileNum { + endIndex++ + } + wg.Add(1) + + go insertData(i, startIndex, endIndex, &wg, successRows) + startIndex, endIndex = endIndex, endIndex+filePerThread + } + + lastStaticTime = time.Now() + timeTicker = time.NewTicker(time.Millisecond * time.Duration(tick)) + go staticSpeed() + wg.Wait() + + usedTime := time.Since(start) + + total := getTotalRows(successRows) + + log.Printf("finished insert %d rows, used %d ms, speed %d rows/s", total, usedTime/1e6, total * 1e9 / int64(usedTime)) + + if vnum == 0 { + // continue waiting for insert data + wait := make(chan string) + v := <- wait + log.Printf("program receive %s, exited.\n", v) + }else{ + timeTicker.Stop() + } + +} + +func staticSpeed(){ + + connection := getConnection() + defer connection.Close() + + if save == 1 { + connection.Exec("use " + db) + _, err := connection.Exec("create table if not exists " + saveTable +"(ts timestamp, speed int)") + if err != nil { + log.Fatalf("create %s Table error: %s\n", saveTable, err) + } + } + + for { + <-timeTicker.C + + currentTime := time.Now() + usedTime := currentTime.UnixNano() - lastStaticTime.UnixNano() + + total := getTotalRows(successRows) + currentSuccessRows := total - lastTotalRows + + speed := currentSuccessRows * 1e9 / int64(usedTime) + log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed) + + if save == 1 { + insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed) + connection.Exec(insertSql) + } + + lastStaticTime = currentTime + lastTotalRows = total + } + +} + +func getTotalRows(successRows []int64) int64{ + var total int64 = 0 + for j := 0; j < len(successRows); j++ { + total += successRows[j] + } + return total +} + +func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval int64){ + if auto == 1 { + // use auto generate data time + start = startTime + avgInterval = interval + maxTableRows := normalizationDataWithSameInterval(fileRows, avgInterval) + cycleTime = maxTableRows * avgInterval + avgInterval + + } else { + + // use the sample data primary timestamp + sort.Sort(fileRows)// sort the file data by the primarykey + minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp]) + maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp]) + + start = minTime // default startTime use the minTime + if DEFAULT_STARTTIME != startTime { + start = startTime + } + + tableNum := normalizationData(fileRows, minTime) + + if minTime == maxTime { + avgInterval = interval + cycleTime = tableNum * avgInterval + avgInterval + }else{ + avgInterval = (maxTime - minTime) / int64(len(fileRows.rows)) * tableNum + cycleTime = maxTime - minTime + avgInterval + } + + } + return +} + +func createStatisticTable(){ + connection := getConnection() + defer connection.Close() + + _, err := connection.Exec("create table if not exist " + db + "."+ saveTable +"(ts timestamp, speed int)") + if err != nil { + log.Fatalf("createStatisticTable error: %s\n", err) + } +} + +func createSubTable(subTableMaps map[string]*dataRows) { + + connection := getConnection() + defer connection.Close() + + connection.Exec("use " + db) + + createTablePrefix := "create table if not exists " + for subTableName := range subTableMaps { + + superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname) + tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags + + buffers := bytes.Buffer{} + // create table t using supertTable tags(...); + for i := 0; i < hnum; i++ { + tableName := getScaleSubTableName(subTableName, i) + + scaleTableMap[tableName] = &scaleTableInfo{ + subTableName: subTableName, + insertRows: 0, + } + scaleTableNames = append(scaleTableNames, tableName) + + buffers.WriteString(createTablePrefix) + buffers.WriteString(tableName) + buffers.WriteString(" using ") + buffers.WriteString(superTableName) + buffers.WriteString(" tags(") + for _, tag := range subTableMaps[subTableName].config.Tags{ + tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)]) + buffers.WriteString("'" + tagValue + "'") + buffers.WriteString(",") + } + buffers.Truncate(buffers.Len()-1) + buffers.WriteString(")") + + createTableSql := buffers.String() + buffers.Reset() + + //log.Printf("create table: %s\n", createTableSql) + _, err := connection.Exec(createTableSql) + if err != nil { + log.Fatalf("create table error: %s\n", err) + } + } + } +} + +func createSuperTable(superTableConfigMap map[string]*superTableConfig) { + + connection := getConnection() + defer connection.Close() + + if dropdb == 1 { + dropDbSql := "drop database if exists " + db + _, err := connection.Exec(dropDbSql) // drop database if exists + if err != nil { + log.Fatalf("drop database error: %s\n", err) + } + log.Printf("dropDb: %s\n", dropDbSql) + } + + createDbSql := "create database if not exists " + db + " " + dbparam + + _, err := connection.Exec(createDbSql) // create database if not exists + if err != nil { + log.Fatalf("create database error: %s\n", err) + } + log.Printf("createDb: %s\n", createDbSql) + + connection.Exec("use " + db) + + prefix := "create table if not exists " + var buffer bytes.Buffer + //CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …) + for key := range superTableConfigMap { + + buffer.WriteString(prefix) + buffer.WriteString(getSuperTableName(key)) + buffer.WriteString("(") + + superTableConf := superTableConfigMap[key] + + buffer.WriteString(superTableConf.config.Timestamp) + buffer.WriteString(" timestamp, ") + + for _, field := range superTableConf.config.Fields { + buffer.WriteString(field.Name + " " + field.Type + ",") + } + + buffer.Truncate(buffer.Len()-1) + buffer.WriteString(") tags( ") + + for _, tag := range superTableConf.config.Tags { + buffer.WriteString(tag.Name + " " + tag.Type + ",") + } + + buffer.Truncate(buffer.Len()-1) + buffer.WriteString(")") + + createSql := buffer.String() + buffer.Reset() + + //log.Printf("supertable: %s\n", createSql) + _, err = connection.Exec(createSql) + if err != nil { + log.Fatalf("create supertable error: %s\n", err) + } + } + +} + +func getScaleSubTableName(subTableName string, hnum int) string { + if hnum == 0 { + return subTableName + } + return fmt.Sprintf( "%s_%d", subTableName, hnum) +} + +func getSuperTableName(stname string) string { + return SUPERTABLE_PREFIX + stname +} + + +/** +* normalizationData , and return the num of subTables + */ +func normalizationData(fileRows dataRows, minTime int64) int64 { + + var tableNum int64 = 0 + for _, row := range fileRows.rows { + // get subTableName + tableValue := getSubTableNameValue(row[fileRows.config.SubTableName]) + if len(tableValue) == 0 { + continue + } + + row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime + + subTableName := getSubTableName(tableValue, fileRows.config.Stname) + + value, ok := subTableMap[subTableName] + if !ok { + subTableMap[subTableName] = &dataRows{ + rows: []map[string]interface{}{row}, + config: fileRows.config, + } + + tableNum++ + }else{ + value.rows = append(value.rows, row) + } + } + return tableNum +} + +// return the maximum table rows +func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int64{ + // subTableMap + currSubTableMap := make(map[string]*dataRows) + for _, row := range fileRows.rows { + // get subTableName + tableValue := getSubTableNameValue(row[fileRows.config.SubTableName]) + if len(tableValue) == 0 { + continue + } + + subTableName := getSubTableName(tableValue, fileRows.config.Stname) + + value, ok := currSubTableMap[subTableName] + if !ok { + row[fileRows.config.Timestamp] = 0 + currSubTableMap[subTableName] = &dataRows{ + rows: []map[string]interface{}{row}, + config: fileRows.config, + } + }else{ + row[fileRows.config.Timestamp] = int64(len(value.rows)) * avgInterval + value.rows = append(value.rows, row) + } + + } + + var maxRows, tableRows int = 0, 0 + for tableName := range currSubTableMap{ + tableRows = len(currSubTableMap[tableName].rows) + subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap + if tableRows > maxRows { + maxRows = tableRows + } + } + + return int64(maxRows) +} + + +func getSubTableName(subTableValue string, superTableName string) string { + return SUBTABLE_PREFIX + subTableValue + "_" + superTableName +} + + +func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) { + connection := getConnection() + defer connection.Close() + defer wg.Done() + + connection.Exec("use " + db) // use db + + log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end) + + num := 0 + subTables := scaleTableNames[start:end] + for { + var currSuccessRows int64 + var appendRows int + var lastTableName string + + buffers := bytes.Buffer{} + buffers.WriteString(INSERT_PREFIX) + + for _, tableName := range subTables { + + subTableInfo := subTableMap[scaleTableMap[tableName].subTableName] + subTableRows := int64(len(subTableInfo.rows)) + superTableConf := superTableConfigMap[subTableInfo.config.Stname] + + tableStartTime := superTableConf.startTime + var tableEndTime int64 + if vnum == 0 { + // need continue generate data + tableEndTime = time.Now().UnixNano()/1e6 + }else { + tableEndTime = tableStartTime + superTableConf.cycleTime * int64(vnum) - superTableConf.avgInterval + } + + insertRows := scaleTableMap[tableName].insertRows + + for { + loopNum := insertRows / subTableRows + rowIndex := insertRows % subTableRows + currentRow := subTableInfo.rows[rowIndex] + + currentTime := getPrimaryKey(currentRow[subTableInfo.config.Timestamp]) + loopNum * superTableConf.cycleTime + tableStartTime + if currentTime <= tableEndTime { + // append + + if lastTableName != tableName { + buffers.WriteString(tableName) + buffers.WriteString(" values") + } + lastTableName = tableName + + buffers.WriteString("(") + buffers.WriteString(fmt.Sprintf("%v", currentTime)) + buffers.WriteString(",") + + // fieldNum := len(subTableInfo.config.Fields) + for _,field := range subTableInfo.config.Fields { + buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) + buffers.WriteString(",") + // if( i != fieldNum -1){ + + // } + } + + buffers.Truncate(buffers.Len()-1) + buffers.WriteString(") ") + + appendRows++ + insertRows++ + if appendRows == batch { + // executebatch + insertSql := buffers.String() + affectedRows := executeBatchInsert(insertSql, connection) + + successRows[threadIndex] += affectedRows + currSuccessRows += affectedRows + + buffers.Reset() + buffers.WriteString(INSERT_PREFIX) + lastTableName = "" + appendRows = 0 + } + }else { + // finished insert current table + break + } + } + + scaleTableMap[tableName].insertRows = insertRows + + } + + // left := len(rows) + if appendRows > 0 { + // executebatch + insertSql := buffers.String() + affectedRows := executeBatchInsert(insertSql, connection) + + successRows[threadIndex] += affectedRows + currSuccessRows += affectedRows + + buffers.Reset() + } + + // log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6) + + if vnum != 0 { + // thread finished insert data + // log.Printf("thread-%d exit\n", threadIndex) + break + } + + if(num == 0){ + wg.Done() //finished insert history data + num++ + } + + if currSuccessRows == 0 { + // log.Printf("thread-%d start to sleep %d ms.", threadIndex, delay) + time.Sleep(time.Duration(delay) * time.Millisecond) + } + + // need continue insert data + } + +} + +func buildSql(rows []tableRows) string{ + + var lastTableName string + + buffers := bytes.Buffer{} + + for i, row := range rows { + if i == 0 { + lastTableName = row.tableName + buffers.WriteString(INSERT_PREFIX) + buffers.WriteString(row.tableName) + buffers.WriteString(" values") + buffers.WriteString(row.value) + continue + } + + if lastTableName == row.tableName { + buffers.WriteString(row.value) + }else { + buffers.WriteString(" ") + buffers.WriteString(row.tableName) + buffers.WriteString(" values") + buffers.WriteString(row.value) + lastTableName = row.tableName + } + } + + inserSql := buffers.String() + return inserSql +} + +func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows{ + + tableRows := tableRows{tableName: tableName} + + buffers := bytes.Buffer{} + + buffers.WriteString("(") + buffers.WriteString(fmt.Sprintf("%v", currentTime)) + buffers.WriteString(",") + + for _,field := range subTableInfo.config.Fields { + buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) + buffers.WriteString(",") + } + + buffers.Truncate(buffers.Len()-1) + buffers.WriteString(")") + + insertSql := buffers.String() + tableRows.value = insertSql + + return tableRows +} + +func executeBatchInsert(insertSql string, connection *sql.DB) int64 { + result, error := connection.Exec(insertSql) + if error != nil { + log.Printf("execute insertSql %s error, %s\n", insertSql, error) + return 0 + } + affected, _ := result.RowsAffected() + if affected < 0 { + affected = 0 + } + return affected + // return 0 +} + +func getFieldValue(fieldValue interface{}) string { + return fmt.Sprintf("'%v'", fieldValue) +} + +func getConnection() *sql.DB{ + db, err := sql.Open(DRIVER_NAME, dataSourceName) + if err != nil { + panic(err) + } + return db +} + + +func getSubTableNameValue(suffix interface{}) string { + return fmt.Sprintf("%v", suffix) +} + +func hash(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v < 0 { + return -v + } + return v +} + +func readFile(config dataimport.CaseConfig) dataRows { + fileFormat := strings.ToLower(config.Format) + if fileFormat == JSON_FORMAT { + return readJSONFile(config) + } else if fileFormat == CSV_FORMAT { + return readCSVFile(config) + } + + log.Printf("the file %s is not supported yet\n", config.FilePath) + return dataRows{} +} + +func readCSVFile(config dataimport.CaseConfig) dataRows { + var rows dataRows + f, err := os.Open(config.FilePath) + if err != nil { + log.Printf("Error: %s, %s\n", config.FilePath, err) + return rows + } + defer f.Close() + + r := bufio.NewReader(f) + + //read the first line as title + lineBytes, _, err := r.ReadLine() + if err == io.EOF { + log.Printf("the file %s is empty\n", config.FilePath) + return rows + } + line := strings.ToLower(string(lineBytes)) + titles := strings.Split(line, config.Separator) + if len(titles) < 3 { + // need suffix、 primarykey and at least one other field + log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath) + return rows + } + + rows.config = config + + var lineNum = 0 + for { + // read data row + lineBytes, _, err = r.ReadLine() + lineNum++ + if err == io.EOF { + break + } + // fmt.Println(line) + rowData := strings.Split(string(lineBytes), config.Separator) + + dataMap := make(map[string]interface{}) + for i, title := range titles { + title = strings.TrimSpace(title) + if i < len(rowData) { + dataMap[title] = strings.TrimSpace(rowData[i]) + } else { + dataMap[title] = "" + } + } + + // if the suffix valid + if !existMapKeyAndNotEmpty(config.Timestamp, dataMap) { + log.Printf("the Timestamp[%s] of line %d is empty, will filtered.\n", config.Timestamp, lineNum) + continue + } + + // if the primary key valid + primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap) + if primaryKeyValue == -1 { + log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum) + continue + } + + dataMap[config.Timestamp] = primaryKeyValue + + rows.rows = append(rows.rows, dataMap) + } + return rows +} + +func readJSONFile(config dataimport.CaseConfig) dataRows { + + var rows dataRows + f, err := os.Open(config.FilePath) + if err != nil { + log.Printf("Error: %s, %s\n", config.FilePath, err) + return rows + } + defer f.Close() + + r := bufio.NewReader(f) + //log.Printf("file size %d\n", r.Size()) + + rows.config = config + var lineNum = 0 + for { + lineBytes, _, err := r.ReadLine() + lineNum++ + if err == io.EOF { + break + } + + line := make(map[string]interface{}) + err = json.Unmarshal(lineBytes, &line) + + if err != nil { + log.Printf("line [%d] of file %s parse error, reason: %s\n", lineNum, config.FilePath, err) + continue + } + + // transfer the key to lowercase + lowerMapKey(line) + + if !existMapKeyAndNotEmpty(config.SubTableName, line) { + log.Printf("the SubTableName[%s] of line %d is empty, will filtered.\n", config.SubTableName, lineNum) + continue + } + + primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line) + if primaryKeyValue == -1 { + log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum) + continue + } + + line[config.Timestamp] = primaryKeyValue + + rows.rows = append(rows.rows, line) + } + + return rows +} + +/** +* get primary key as millisecond , otherwise return -1 + */ +func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 { + if !existMapKeyAndNotEmpty(key, line) { + return -1 + } + if DATETIME == valueType { + // transfer the datetime to milliseconds + return parseMillisecond(line[key], valueFormat) + } + + value, err := strconv.ParseInt(fmt.Sprintf("%v", line[key]), 10, 64) + // as millisecond num + if err != nil { + return -1 + } + return value +} + +// parseMillisecond parse the dateStr to millisecond, return -1 if failed +func parseMillisecond(str interface{}, layout string) int64 { + value, ok := str.(string) + if !ok { + return -1 + } + + t, err := time.ParseInLocation(layout, strings.TrimSpace(value), time.Local) + + if err != nil { + log.Println(err) + return -1 + } + return t.UnixNano()/1e6 +} + +// lowerMapKey transfer all the map key to lowercase +func lowerMapKey(maps map[string]interface{}) { + for key := range maps { + value := maps[key] + delete(maps, key) + maps[strings.ToLower(key)] = value + } +} + +func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool { + value, ok := maps[key] + if !ok { + return false + } + + str, err := value.(string) + if err && len(str) == 0 { + return false + } + return true +} + +func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) { + + if len(caseConfig.Stname) == 0 { + log.Fatalf("the stname of case %s can't be empty\n", caseName) + } + + caseConfig.Stname = strings.ToLower(caseConfig.Stname) + + if len(caseConfig.Tags) == 0 { + log.Fatalf("the tags of case %s can't be empty\n", caseName) + } + + if len(caseConfig.Fields) == 0 { + log.Fatalf("the fields of case %s can't be empty\n", caseName) + } + + if len(caseConfig.SubTableName) == 0 { + log.Fatalf("the suffix of case %s can't be empty\n", caseName) + } + + caseConfig.SubTableName = strings.ToLower(caseConfig.SubTableName) + + caseConfig.Timestamp = strings.ToLower(caseConfig.Timestamp) + + var timestampExist = false + for i, field := range caseConfig.Fields { + if strings.EqualFold(field.Name, caseConfig.Timestamp) { + if strings.ToLower(field.Type) != TIMESTAMP { + log.Fatalf("case %s's primaryKey %s field type is %s, it must be timestamp\n", caseName, caseConfig.Timestamp, field.Type) + } + timestampExist = true + if i < len(caseConfig.Fields)-1 { + // delete middle item, a = a[:i+copy(a[i:], a[i+1:])] + caseConfig.Fields = caseConfig.Fields[:i+copy(caseConfig.Fields[i:], caseConfig.Fields[i+1:])] + }else { + // delete the last item + caseConfig.Fields = caseConfig.Fields[:len(caseConfig.Fields)-1] + } + break + } + } + + if !timestampExist { + log.Fatalf("case %s primaryKey %s is not exist in fields\n", caseName, caseConfig.Timestamp) + } + + caseConfig.TimestampType = strings.ToLower(caseConfig.TimestampType) + if caseConfig.TimestampType != MILLISECOND && caseConfig.TimestampType != DATETIME { + log.Fatalf("case %s's timestampType %s error, only can be timestamp or datetime\n", caseName, caseConfig.TimestampType) + } + + if caseConfig.TimestampType == DATETIME && len(caseConfig.TimestampTypeFormat) == 0 { + log.Fatalf("case %s's timestampTypeFormat %s can't be empty when timestampType is datetime\n", caseName, caseConfig.TimestampTypeFormat) + } + +} + +func parseArg() { + flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.") + flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.") + flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.") + flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.") + flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.") + flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.") + flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.") + flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.") + flag.IntVar(&thread, "thread", 10, "number of threads to import data.") + flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.") + flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.") + flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.") + flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.") + flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.") + flag.IntVar(&port, "port", 6030, "tdengine server port.") + flag.StringVar(&user, "user", "root", "user name to login into the database.") + flag.StringVar(&password, "password", "taosdata", "the import tdengine user password") + flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.") + flag.StringVar(&db, "db", "", "name of the database to store data.") + flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.") + + flag.Parse() +} + +func printArg() { + fmt.Println("used param: ") + fmt.Println("-cfg: ", cfg) + fmt.Println("-cases:", cases) + fmt.Println("-hnum:", hnum) + fmt.Println("-vnum:", vnum) + fmt.Println("-delay:", delay) + fmt.Println("-tick:", tick) + fmt.Println("-save:", save) + fmt.Println("-savetb:", saveTable) + fmt.Println("-thread:", thread) + fmt.Println("-batch:", batch) + fmt.Println("-auto:", auto) + fmt.Println("-start:", starttimestr) + fmt.Println("-interval:", interval) + fmt.Println("-host:", host) + fmt.Println("-port", port) + fmt.Println("-user", user) + fmt.Println("-password", password) + fmt.Println("-dropdb", dropdb) + fmt.Println("-db", db) + fmt.Println("-dbparam", dbparam) +} diff --git a/importSampleData/bin/taosimport b/importSampleData/bin/taosimport new file mode 100755 index 0000000000000000000000000000000000000000..b042549341bced364e0fd77909b115d1b5b6dc04 Binary files /dev/null and b/importSampleData/bin/taosimport differ diff --git a/importSampleData/config/cfg.toml b/importSampleData/config/cfg.toml new file mode 100644 index 0000000000000000000000000000000000000000..52a5d5f3169d21ce17039ead956250a636b37a01 --- /dev/null +++ b/importSampleData/config/cfg.toml @@ -0,0 +1,51 @@ +# 传感器场景 +[sensor_info] # 场景名称 +format = "csv" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。 +filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据 +separator = "," # csv 样例文件中字段分隔符,默认逗号 + +stname = "sensor_info" # 超级表名称 +subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。 +timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp +timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式 +#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定 +tags = [ + # 标签列表,name 为标签名称,type 为标签类型 + { name = "location", type = "binary(20)" }, + { name = "color", type = "binary(16)" }, + { name = "devgroup", type = "int" }, +] + +fields = [ + # 字段列表,name 为字段名称,type 为字段类型 + { name = "ts", type = "timestamp" }, + { name = "temperature", type = "int" }, + { name = "humidity", type = "float" }, +] + +# 摄像头检测场景 +[camera_detection] # 场景名称 +format = "json" # 样例数据文件格式,可以是 json 或 csv,具体字段应至少包含 subTableName、tags、fields 指定的字段。 +filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据 +#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置 + +stname = "camera_detection" # 超级表名称 +subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname,扩展表名为 t_subTableName_stname_i。 +timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp +timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式 +timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式,timestampType 为 dateTime 时需要指定 +tags = [ + # 标签列表,name 为标签名称,type 为标签类型 + { name = "home_id", type = "binary(30)" }, + { name = "object_type", type = "int" }, + { name = "object_kind", type = "binary(20)" }, +] + +fields = [ + # 字段列表,name 为字段名称,type 为字段类型 + { name = "ts", type = "timestamp" }, + { name = "states", type = "tinyint" }, + { name = "battery_voltage", type = "float" }, +] + +# other case \ No newline at end of file diff --git a/importSampleData/dashboard/sensor_info.json b/importSampleData/dashboard/sensor_info.json new file mode 100644 index 0000000000000000000000000000000000000000..6dcf5505f2a1a2db3a10cb9c7bed47ac5dc3687c --- /dev/null +++ b/importSampleData/dashboard/sensor_info.json @@ -0,0 +1,380 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 7, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": null, + "format": "celsius", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "alias": "lastest_temperature", + "refId": "A", + "sql": "select ts, temp from test.stream_temp_last where ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": "20,30", + "timeFrom": null, + "timeShift": null, + "title": "最新温度", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "datasource": null, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 8, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "decimals": 2, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ], + "title": "" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "6.4.3", + "targets": [ + { + "alias": "maxHumidity", + "refId": "A", + "sql": "select ts, humidity from test.stream_humidity_max where ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "最大湿度", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "avgTemperature", + "refId": "A", + "sql": "select ts, temp from test.stream_temp_avg where ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "平均温度", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "celsius", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "max", + "refId": "A", + "sql": "select ts, max_temp from test.stream_sensor where ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "avg", + "refId": "B", + "sql": "select ts, avg_temp from test.stream_sensor where ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "min", + "refId": "C", + "sql": "select ts, min_temp from test.stream_sensor where ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "某传感器", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "celsius", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 20, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "sensor_info", + "uid": "dGSoaTLWz", + "version": 2 +} \ No newline at end of file diff --git a/importSampleData/data/camera_detection.json b/importSampleData/data/camera_detection.json new file mode 100644 index 0000000000000000000000000000000000000000..cf67e38fa71255fc63ada2a05f1891e2e509fc2f --- /dev/null +++ b/importSampleData/data/camera_detection.json @@ -0,0 +1,1000 @@ +{"battery_voltage":0.80233014,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.83228004,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.7123188,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.5328185,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.54848474,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.7576063,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.60713196,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.65902907,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.64151704,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.8395423,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.60159343,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.7853366,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.6465571,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.8762865,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.9326675,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.76191014,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.57916415,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.98762083,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.7974043,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.8460123,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.5866331,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.7720778,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.7115761,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.62677026,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.8943025,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.94027156,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.94718087,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.9884584,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.6111447,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.6207575,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.9664232,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.9005275,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.59146243,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.948496,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.98946464,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.5454186,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.9634934,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.673977,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.8554536,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.8247447,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.87791175,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.56532556,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.9481709,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.8605739,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.54276025,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.8113642,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.6184113,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.59362304,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.8140491,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.6406652,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.7174562,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.77507347,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.8645904,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.5002569,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.6999919,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.8019891,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.51483566,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.5014215,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.7949171,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.90770257,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.7292212,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.5131326,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.6248466,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.6237333,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.79631186,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.84691906,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.76960504,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.8753815,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.8765806,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.6778836,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.615915,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.7491971,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.51259696,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.79469156,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.7860434,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.70588136,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.7458037,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.8986043,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.8915175,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.56520694,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.86991286,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.5491919,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.5498648,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.5380951,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.57982546,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.6613053,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.7854258,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.84208757,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.7622499,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.8581842,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.506413,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.54901546,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.9132271,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":1,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.6721575,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.6082356,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.70103544,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.58433986,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.91396403,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.52896315,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.7057702,"home_id":"603","object_kind":"night","object_type":1,"sensor_id":"s100","states":0,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.89037704,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.5267473,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.6253811,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.986941,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.51076686,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.54648507,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.6559428,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.7436196,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.83591455,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.9501376,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.65966564,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.7002162,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.8225194,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.6697984,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.6181637,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.51787734,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.8129183,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.5362242,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.93992245,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.92375016,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.6239222,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.5375186,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.81466585,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.8160017,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.5074137,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.5343781,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.8245942,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.91740286,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.8306966,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.65525514,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.9835472,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.6547742,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.7086629,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.70336837,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.9790882,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.8958361,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.50759065,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.9523881,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.52146083,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.6739295,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.91997373,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.5621818,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.9174738,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.5038406,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.68513376,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.821602,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.89556265,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.67343193,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.91104645,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.79959714,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.7067905,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.95580685,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.6144588,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.67538255,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.65190107,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.8357633,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.9815697,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.90397054,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.9738802,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.9766294,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.5907954,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.9156205,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.92765516,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.63674736,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.95488065,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.7493162,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.98794764,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.5224953,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.9759531,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.76789546,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.9325875,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.7892754,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.7753079,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.7549327,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.745397,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.6312453,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.68574333,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.70787597,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.9508138,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.6369623,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.92772424,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.9945661,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.585473,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.7667257,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.9067954,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.62860376,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.66754717,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.5024399,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.6147868,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.9749687,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.9813121,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.85633135,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.70376605,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.6737342,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.79878306,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.91642797,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.96835375,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.86015654,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.725077,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.736246,"home_id":"604","object_kind":"day","object_type":2,"sensor_id":"s101","states":1,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.68116575,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.5239342,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.8781051,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.61049944,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.6954212,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.57484275,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.88279426,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.727722,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.54098475,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.6331909,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.5495351,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.57960176,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.8157383,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.9837526,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.66909057,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.918733,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.75111043,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.73151976,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.87203634,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.6242085,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.7118511,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.8284241,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.81839544,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.6934307,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.5631822,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.7556696,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.9973032,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.8636595,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.7570118,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.7728013,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.6466422,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.57088935,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.8156741,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.5007058,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.94389606,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.7980893,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.9149192,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.5329674,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.667759,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.8095149,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.66232204,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.54209346,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.8437841,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.51106554,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.5391229,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.6142876,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.63602245,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.83091503,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.98437226,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.6822,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.60308766,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.88321567,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.64395475,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.726102,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.6945282,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.5037642,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.50224465,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.61892045,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.8965783,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.72004735,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.89201033,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.55109394,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.5819292,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.56059873,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.99916655,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.5516443,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.65729505,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.57163346,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.843902,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.51640797,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.6674092,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.67429006,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.95735073,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.5792276,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.63157403,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.59447736,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.8206818,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.8141984,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.66849256,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.71412754,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.6733996,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.9024965,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.6886468,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.7236516,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.5494264,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.51326233,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.89173627,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.98756754,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.7213226,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.8062184,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.5482464,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.61909574,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.7190039,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.60273135,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.7350895,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":1,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.5447789,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.509202,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.97541416,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.7516321,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.7726933,"home_id":"605","object_kind":"all","object_type":3,"sensor_id":"s102","states":0,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.60115623,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.9755862,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.9823349,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.6357885,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.6279355,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.59463865,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.67826885,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.8077018,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.8912208,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.8821316,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.56158596,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.76752067,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.6092849,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.8139862,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.7290665,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.93346804,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.7031946,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.73181903,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.8115653,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.66609514,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.8918715,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.89229536,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.6547448,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.5263817,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.69104654,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.64589655,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.7149786,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.6625407,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.7064498,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.8864048,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.56908727,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.66720784,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.8207879,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.7704214,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.74916565,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.53460443,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.70717573,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.9661542,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.8559648,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.5753055,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.8062254,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.8050467,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.5420858,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.89997375,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.5517962,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.7491184,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.9720428,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.8925575,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.80679524,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.80774236,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.53613126,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.9552542,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.9303039,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.9168983,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.78906983,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.5393992,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.7752098,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.7393297,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.5901948,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.82910055,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.88593745,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.60122955,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.878977,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.75698256,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.50624055,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.9885113,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.74340963,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.9759798,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.73438704,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.7121439,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.7707707,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.8732446,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.8968997,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.82115555,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.85465467,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.7902354,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.50993747,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.8614131,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.92145103,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.9863989,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.58747536,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.8356127,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.8804123,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.54516625,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.54958564,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.5939968,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.5792352,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.5488316,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.9730228,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.5745121,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.8696457,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.94995236,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.9038729,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.7729239,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.6789726,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.8997017,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.72364557,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.88753945,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.7016446,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.53595066,"home_id":"606","object_kind":"night","object_type":1,"sensor_id":"s103","states":1,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.8033614,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.8147938,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.6050153,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.7920519,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.733798,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.7512984,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.972511,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.8678342,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.5627333,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.50696725,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.7697411,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.7384832,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.57802075,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.6342828,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.8889152,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.7986384,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.7695893,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.6342156,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.82402253,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.9537116,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.85123,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.94443214,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.81446874,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.5079787,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.82231855,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.54318166,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.887102,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.7985031,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.9324222,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.9568784,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.84419024,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.63686687,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.862638,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.63915664,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.94823104,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.80180836,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.56163365,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.60698605,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.90496016,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.79479086,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.5411746,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.7360853,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.8097295,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.7171494,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.849315,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.663502,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.51946706,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.85430115,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.82286215,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.9102302,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.94066036,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.8434773,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.95908654,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.5931864,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.9871588,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.8742759,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.50797683,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.56906056,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.9103812,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.61753106,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.7401742,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.95390666,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.5069772,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.51301944,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.72201246,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.8913778,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.976287,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.991058,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.99977124,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.7334305,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.552872,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.7832855,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.70349,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.964519,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.74284106,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.66428864,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.5493044,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.74065554,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.96337205,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.67027295,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.81034344,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.6549411,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.5835841,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.96476233,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.7508897,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.5903082,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.7541075,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.8509584,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.58535063,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.51696,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.8245963,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.5676064,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.9954416,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.6617937,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.5499162,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.64593154,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":1,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.946115,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.5849637,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.68064904,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.8852545,"home_id":"603","object_kind":"day","object_type":2,"sensor_id":"s104","states":0,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.70754087,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.6483855,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.5671366,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.76337266,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.9920288,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.5574518,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.59904534,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.6480302,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.63429725,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.85299885,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.77297366,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.7668507,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.57824785,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.76801443,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.8984245,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.52167296,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.8797653,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.70621747,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.8416389,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.5681568,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.9125648,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.5100865,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.9596597,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.5011256,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.8343365,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.64652085,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.6358192,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.92160124,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.909333,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.95970964,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.94331,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.65175146,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.69886935,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.9866854,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.5484814,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.6101544,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.8419212,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.6960639,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.8068489,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.68448293,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.8672006,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.9113866,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.8871064,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.96817946,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.5816642,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.6309987,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.9452791,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.98369205,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.7123141,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.9546062,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.92401385,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.59127367,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.87045366,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.8465115,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.91188776,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.61064494,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.84154475,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.69890535,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.57661706,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.89222425,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.56609154,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.9224727,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.8360301,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.91405284,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.8875489,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.6775255,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.71002764,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.7901696,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.84012544,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.7698927,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.6951759,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.5941455,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.8753067,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.8527192,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.7162281,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.96830696,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.82742965,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.62583256,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.8133428,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.73012495,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.8870168,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.592625,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.58833945,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.6206717,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.6431462,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.8724054,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.79947186,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.9971847,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.9268321,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.82837874,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.5304892,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.6329912,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.90618366,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.5784858,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.7942324,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.6310129,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.9656929,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.9464745,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.5906156,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.57623565,"home_id":"604","object_kind":"all","object_type":3,"sensor_id":"s105","states":1,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.8002974,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.65368044,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.71293247,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.9082031,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.7811729,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.96570766,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.8413833,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.5964865,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.8187906,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.95528543,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.8641478,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.9830004,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.88352764,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.9232228,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.95486975,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.94609356,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.61100274,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.5691416,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.9360826,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.8925245,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.6242925,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.7285948,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.74059856,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.64874685,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.7564658,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.98491573,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.598005,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.88058275,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.54105055,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.93672323,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.82872415,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.6971599,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.6769042,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.6805867,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.6872542,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.82297754,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.81444764,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.69297683,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.8391928,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.80736417,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.7868073,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.77172005,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.5137727,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.95526296,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.938064,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.9020388,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.9114888,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.6880104,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.9375304,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.7244901,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.82105714,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.6234149,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.92923963,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.6733919,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.76741683,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.5319273,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.68805224,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.7300814,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.6131429,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.6922425,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.9727907,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.82986295,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.5132921,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.77134275,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.5777383,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.7101292,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.6752328,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.6355128,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.9268579,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.8940948,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.8045571,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.6397352,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.5142179,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.57437795,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.5779674,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.5777746,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.79977393,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.91564786,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.83601356,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.60413766,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.98716986,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.93296355,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.90041673,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.5376759,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.71533316,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.69811344,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.9715346,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.9206581,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.8165749,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.6838542,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.87848604,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.67027926,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.90292645,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.58885974,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.6755761,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.58424705,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.8706522,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.5665725,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.8853537,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.74042374,"home_id":"605","object_kind":"night","object_type":1,"sensor_id":"s106","states":1,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.7546813,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.6428457,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.8217722,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.5497275,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.549164,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.99488986,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.65951693,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.98187494,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.51635957,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.71983063,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.9287454,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.764307,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.7559774,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.8555727,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.74285305,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.8345988,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.80865055,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.6373774,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.70070326,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.7702416,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.8708988,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.7460189,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.8054011,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.70088184,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.97855425,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.92553365,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.8004091,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.58621615,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.8544398,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.93507946,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.981555,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.6559863,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.589917,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.77023107,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.8414885,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.92723155,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.68667865,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.6563879,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.5494162,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.73033655,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.8967389,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.93003184,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.5939365,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.8320396,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.99154466,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.9142281,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.9949862,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.7782185,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.5089121,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.73104143,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.8676681,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.6835471,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.7104448,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.8338785,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.78650606,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.86156666,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.67074865,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.92131823,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.6692456,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.70075643,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.810084,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.5218424,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.66221285,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.8589293,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.85367,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.76111495,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.5683803,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.965793,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.97445166,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.64657986,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.8598856,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.9699453,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.77614653,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.73633116,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.66921216,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.61229855,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.9456196,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.8569248,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.5586567,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.5249643,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.51541376,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.9897876,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.5684158,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.7586645,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.57831913,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.5272984,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.8490623,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.61126375,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.6298294,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.58072305,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.54520565,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.65894264,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.55736834,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.9139086,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.59066606,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.65324485,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.52651376,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.79430807,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.68324184,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.9977864,"home_id":"606","object_kind":"day","object_type":2,"sensor_id":"s107","states":1,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.86721027,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.91057515,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.6340915,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.9256289,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.524389,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.900969,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.70975065,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.6816068,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.60286266,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.64431405,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.8481047,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.74875927,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.553125,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.89230585,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.8484179,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.508562,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.6212453,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.8540254,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.5535025,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.73381513,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.64239544,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.55263007,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.6341637,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.7654568,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.92196476,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.9304836,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.6291903,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.72140205,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.8851147,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.80896443,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.63162744,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.539704,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.9556397,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.6092425,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.64407754,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.8924454,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.6341453,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.80927,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.6517041,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.597603,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.89759815,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.91360915,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.77801263,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.6941989,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.5947089,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.5456626,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.607256,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.61421853,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.63586694,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.6851482,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.6763804,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.82943195,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.50045407,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.7916049,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.7013703,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.6699885,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.8420504,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.51466507,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.90366614,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.85084975,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.9257466,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.5102245,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.96108043,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.5486912,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.89654887,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.8891253,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.5727371,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.783249,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.5469513,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.9613789,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.69545364,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.86351585,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.8531561,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.7359057,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.621776,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.6604511,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.5478783,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.66255414,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.9499179,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.88831574,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.7580633,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.9225353,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.6692219,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.61912835,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.99582875,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.87902415,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.67628443,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.83687174,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.9943143,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.6201099,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.674114,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.67246366,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.8239599,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.70240146,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.69047993,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.7104731,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.9709189,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.94283324,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.9247738,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.552071,"home_id":"603","object_kind":"all","object_type":3,"sensor_id":"s108","states":1,"ts":"2019-12-01 00:01:39.000"} +{"battery_voltage":0.9636625,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:00.000"} +{"battery_voltage":0.6839534,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:01.000"} +{"battery_voltage":0.9954566,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:02.000"} +{"battery_voltage":0.86396515,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:03.000"} +{"battery_voltage":0.61659896,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:04.000"} +{"battery_voltage":0.71129197,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:05.000"} +{"battery_voltage":0.905854,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:06.000"} +{"battery_voltage":0.7705264,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:07.000"} +{"battery_voltage":0.7981062,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:08.000"} +{"battery_voltage":0.7636435,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:09.000"} +{"battery_voltage":0.9345093,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:10.000"} +{"battery_voltage":0.5336993,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:11.000"} +{"battery_voltage":0.80898285,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:12.000"} +{"battery_voltage":0.89411414,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:13.000"} +{"battery_voltage":0.56510407,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:14.000"} +{"battery_voltage":0.7335708,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:15.000"} +{"battery_voltage":0.93600345,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:16.000"} +{"battery_voltage":0.9809611,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:17.000"} +{"battery_voltage":0.59630346,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:18.000"} +{"battery_voltage":0.8613196,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:19.000"} +{"battery_voltage":0.62268347,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:20.000"} +{"battery_voltage":0.9764792,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:21.000"} +{"battery_voltage":0.8251264,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:22.000"} +{"battery_voltage":0.95377636,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:23.000"} +{"battery_voltage":0.8570133,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:24.000"} +{"battery_voltage":0.92142123,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:25.000"} +{"battery_voltage":0.8477019,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:26.000"} +{"battery_voltage":0.8612052,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:27.000"} +{"battery_voltage":0.59492385,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:28.000"} +{"battery_voltage":0.87671703,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:29.000"} +{"battery_voltage":0.9056556,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:30.000"} +{"battery_voltage":0.93940216,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:31.000"} +{"battery_voltage":0.8290224,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:32.000"} +{"battery_voltage":0.5113568,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:33.000"} +{"battery_voltage":0.59223604,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:34.000"} +{"battery_voltage":0.51160496,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:35.000"} +{"battery_voltage":0.54997766,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:36.000"} +{"battery_voltage":0.8167529,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:37.000"} +{"battery_voltage":0.73863506,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:38.000"} +{"battery_voltage":0.7665298,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:39.000"} +{"battery_voltage":0.82101595,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:40.000"} +{"battery_voltage":0.97279453,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:41.000"} +{"battery_voltage":0.5629725,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:42.000"} +{"battery_voltage":0.53847814,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:43.000"} +{"battery_voltage":0.589947,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:44.000"} +{"battery_voltage":0.98508626,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:45.000"} +{"battery_voltage":0.84777415,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:46.000"} +{"battery_voltage":0.68025327,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:47.000"} +{"battery_voltage":0.6514157,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:48.000"} +{"battery_voltage":0.5478574,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:49.000"} +{"battery_voltage":0.8615689,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:50.000"} +{"battery_voltage":0.9215113,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:51.000"} +{"battery_voltage":0.5097517,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:52.000"} +{"battery_voltage":0.99524146,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:53.000"} +{"battery_voltage":0.62237006,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:54.000"} +{"battery_voltage":0.7505579,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:55.000"} +{"battery_voltage":0.6049488,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:56.000"} +{"battery_voltage":0.6638993,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:57.000"} +{"battery_voltage":0.8366454,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:00:58.000"} +{"battery_voltage":0.5381521,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:00:59.000"} +{"battery_voltage":0.662686,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:00.000"} +{"battery_voltage":0.6258177,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:01.000"} +{"battery_voltage":0.64257276,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:02.000"} +{"battery_voltage":0.65594685,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:03.000"} +{"battery_voltage":0.57828206,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:04.000"} +{"battery_voltage":0.786163,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:05.000"} +{"battery_voltage":0.6895987,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:06.000"} +{"battery_voltage":0.904716,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:07.000"} +{"battery_voltage":0.5041426,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:08.000"} +{"battery_voltage":0.66904837,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:09.000"} +{"battery_voltage":0.7101751,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:10.000"} +{"battery_voltage":0.69509715,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:11.000"} +{"battery_voltage":0.6266739,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:12.000"} +{"battery_voltage":0.97146165,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:13.000"} +{"battery_voltage":0.71578836,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:14.000"} +{"battery_voltage":0.7764681,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:15.000"} +{"battery_voltage":0.94571376,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:16.000"} +{"battery_voltage":0.7120625,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:17.000"} +{"battery_voltage":0.98183215,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:18.000"} +{"battery_voltage":0.9253825,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:19.000"} +{"battery_voltage":0.53743166,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:20.000"} +{"battery_voltage":0.69378746,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:21.000"} +{"battery_voltage":0.784279,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:22.000"} +{"battery_voltage":0.87504184,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:23.000"} +{"battery_voltage":0.7890485,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:24.000"} +{"battery_voltage":0.9394257,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:25.000"} +{"battery_voltage":0.7325297,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:26.000"} +{"battery_voltage":0.79771256,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:27.000"} +{"battery_voltage":0.5948397,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:28.000"} +{"battery_voltage":0.5982751,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:29.000"} +{"battery_voltage":0.5305714,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:30.000"} +{"battery_voltage":0.9328362,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:31.000"} +{"battery_voltage":0.60514575,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:32.000"} +{"battery_voltage":0.64315695,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:33.000"} +{"battery_voltage":0.61862606,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:34.000"} +{"battery_voltage":0.9997138,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":1,"ts":"2019-12-01 00:01:35.000"} +{"battery_voltage":0.9584835,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:36.000"} +{"battery_voltage":0.74601066,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:37.000"} +{"battery_voltage":0.5287202,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:38.000"} +{"battery_voltage":0.9887305,"home_id":"604","object_kind":"night","object_type":1,"sensor_id":"s109","states":0,"ts":"2019-12-01 00:01:39.000"} diff --git a/importSampleData/data/sensor_info.csv b/importSampleData/data/sensor_info.csv new file mode 100644 index 0000000000000000000000000000000000000000..d049c8b00460cdcc2a1bd5b990ae6efa2aa63bd3 --- /dev/null +++ b/importSampleData/data/sensor_info.csv @@ -0,0 +1,1001 @@ +devid,location,color,devgroup,ts,temperature,humidity +0, beijing, white, 0, 1575129600000, 16, 19.405091 +0, beijing, white, 0, 1575129601000, 22, 14.377142 +0, beijing, white, 0, 1575129602000, 16, 16.868231 +0, beijing, white, 0, 1575129603000, 20, 11.565193 +0, beijing, white, 0, 1575129604000, 31, 13.009119 +0, beijing, white, 0, 1575129605000, 29, 18.136400 +0, beijing, white, 0, 1575129606000, 17, 13.806572 +0, beijing, white, 0, 1575129607000, 23, 14.688898 +0, beijing, white, 0, 1575129608000, 26, 12.931019 +0, beijing, white, 0, 1575129609000, 32, 12.185531 +0, beijing, white, 0, 1575129610000, 30, 13.608714 +0, beijing, white, 0, 1575129611000, 23, 18.624914 +0, beijing, white, 0, 1575129612000, 22, 12.970826 +0, beijing, white, 0, 1575129613000, 22, 12.065827 +0, beijing, white, 0, 1575129614000, 25, 16.967192 +0, beijing, white, 0, 1575129615000, 16, 10.283031 +0, beijing, white, 0, 1575129616000, 22, 16.072534 +0, beijing, white, 0, 1575129617000, 24, 10.794536 +0, beijing, white, 0, 1575129618000, 32, 10.591207 +0, beijing, white, 0, 1575129619000, 20, 13.015227 +0, beijing, white, 0, 1575129620000, 28, 15.410999 +0, beijing, white, 0, 1575129621000, 29, 12.785076 +0, beijing, white, 0, 1575129622000, 28, 15.305857 +0, beijing, white, 0, 1575129623000, 33, 12.820810 +0, beijing, white, 0, 1575129624000, 34, 13.618055 +0, beijing, white, 0, 1575129625000, 32, 12.971123 +0, beijing, white, 0, 1575129626000, 24, 10.974546 +0, beijing, white, 0, 1575129627000, 15, 10.742910 +0, beijing, white, 0, 1575129628000, 23, 16.810783 +0, beijing, white, 0, 1575129629000, 18, 13.115224 +0, beijing, white, 0, 1575129630000, 26, 17.418490 +0, beijing, white, 0, 1575129631000, 20, 17.302315 +0, beijing, white, 0, 1575129632000, 21, 14.283571 +0, beijing, white, 0, 1575129633000, 16, 16.826535 +0, beijing, white, 0, 1575129634000, 18, 19.222123 +0, beijing, white, 0, 1575129635000, 18, 14.931420 +0, beijing, white, 0, 1575129636000, 17, 19.549454 +0, beijing, white, 0, 1575129637000, 22, 16.908388 +0, beijing, white, 0, 1575129638000, 32, 15.637796 +0, beijing, white, 0, 1575129639000, 31, 15.517650 +0, beijing, white, 0, 1575129640000, 18, 14.038033 +0, beijing, white, 0, 1575129641000, 32, 19.859647 +0, beijing, white, 0, 1575129642000, 16, 13.220840 +0, beijing, white, 0, 1575129643000, 28, 16.445398 +0, beijing, white, 0, 1575129644000, 26, 16.695753 +0, beijing, white, 0, 1575129645000, 33, 13.696928 +0, beijing, white, 0, 1575129646000, 21, 15.352819 +0, beijing, white, 0, 1575129647000, 15, 12.388407 +0, beijing, white, 0, 1575129648000, 27, 11.267529 +0, beijing, white, 0, 1575129649000, 20, 14.103228 +0, beijing, white, 0, 1575129650000, 20, 16.250950 +0, beijing, white, 0, 1575129651000, 30, 16.236088 +0, beijing, white, 0, 1575129652000, 22, 18.305339 +0, beijing, white, 0, 1575129653000, 25, 17.360686 +0, beijing, white, 0, 1575129654000, 25, 14.978681 +0, beijing, white, 0, 1575129655000, 33, 14.096183 +0, beijing, white, 0, 1575129656000, 26, 10.019039 +0, beijing, white, 0, 1575129657000, 19, 19.158213 +0, beijing, white, 0, 1575129658000, 22, 15.593924 +0, beijing, white, 0, 1575129659000, 26, 18.780118 +0, beijing, white, 0, 1575129660000, 21, 16.001656 +0, beijing, white, 0, 1575129661000, 16, 18.458328 +0, beijing, white, 0, 1575129662000, 21, 16.417843 +0, beijing, white, 0, 1575129663000, 28, 11.736558 +0, beijing, white, 0, 1575129664000, 34, 18.143946 +0, beijing, white, 0, 1575129665000, 27, 10.303225 +0, beijing, white, 0, 1575129666000, 20, 19.756748 +0, beijing, white, 0, 1575129667000, 22, 12.940063 +0, beijing, white, 0, 1575129668000, 23, 11.509640 +0, beijing, white, 0, 1575129669000, 19, 18.319309 +0, beijing, white, 0, 1575129670000, 19, 16.278346 +0, beijing, white, 0, 1575129671000, 27, 10.898361 +0, beijing, white, 0, 1575129672000, 31, 13.922162 +0, beijing, white, 0, 1575129673000, 15, 19.296116 +0, beijing, white, 0, 1575129674000, 26, 15.885763 +0, beijing, white, 0, 1575129675000, 15, 15.525804 +0, beijing, white, 0, 1575129676000, 19, 19.579539 +0, beijing, white, 0, 1575129677000, 20, 11.073811 +0, beijing, white, 0, 1575129678000, 16, 13.932510 +0, beijing, white, 0, 1575129679000, 17, 11.900328 +0, beijing, white, 0, 1575129680000, 22, 16.540414 +0, beijing, white, 0, 1575129681000, 33, 15.203803 +0, beijing, white, 0, 1575129682000, 17, 11.518434 +0, beijing, white, 0, 1575129683000, 17, 13.152081 +0, beijing, white, 0, 1575129684000, 18, 11.378041 +0, beijing, white, 0, 1575129685000, 21, 15.390745 +0, beijing, white, 0, 1575129686000, 30, 15.127818 +0, beijing, white, 0, 1575129687000, 19, 16.530402 +0, beijing, white, 0, 1575129688000, 32, 16.542701 +0, beijing, white, 0, 1575129689000, 26, 16.366442 +0, beijing, white, 0, 1575129690000, 25, 10.306822 +0, beijing, white, 0, 1575129691000, 15, 13.691117 +0, beijing, white, 0, 1575129692000, 15, 13.476817 +0, beijing, white, 0, 1575129693000, 25, 12.529998 +0, beijing, white, 0, 1575129694000, 22, 15.550021 +0, beijing, white, 0, 1575129695000, 20, 15.064971 +0, beijing, white, 0, 1575129696000, 24, 13.313683 +0, beijing, white, 0, 1575129697000, 23, 17.002879 +0, beijing, white, 0, 1575129698000, 30, 19.991595 +0, beijing, white, 0, 1575129699000, 15, 11.116746 +1, shanghai, black, 1, 1575129600000, 24, 10.921176 +1, shanghai, black, 1, 1575129601000, 26, 17.146958 +1, shanghai, black, 1, 1575129602000, 21, 18.486329 +1, shanghai, black, 1, 1575129603000, 34, 12.125609 +1, shanghai, black, 1, 1575129604000, 22, 19.451948 +1, shanghai, black, 1, 1575129605000, 23, 16.458334 +1, shanghai, black, 1, 1575129606000, 18, 14.484644 +1, shanghai, black, 1, 1575129607000, 33, 10.824797 +1, shanghai, black, 1, 1575129608000, 34, 14.001883 +1, shanghai, black, 1, 1575129609000, 32, 19.498832 +1, shanghai, black, 1, 1575129610000, 30, 14.993855 +1, shanghai, black, 1, 1575129611000, 28, 10.198087 +1, shanghai, black, 1, 1575129612000, 32, 14.286884 +1, shanghai, black, 1, 1575129613000, 25, 18.874475 +1, shanghai, black, 1, 1575129614000, 21, 17.650082 +1, shanghai, black, 1, 1575129615000, 15, 17.275773 +1, shanghai, black, 1, 1575129616000, 17, 15.130875 +1, shanghai, black, 1, 1575129617000, 16, 17.242291 +1, shanghai, black, 1, 1575129618000, 15, 19.777635 +1, shanghai, black, 1, 1575129619000, 29, 18.321979 +1, shanghai, black, 1, 1575129620000, 15, 19.133991 +1, shanghai, black, 1, 1575129621000, 16, 18.351038 +1, shanghai, black, 1, 1575129622000, 31, 17.517406 +1, shanghai, black, 1, 1575129623000, 34, 10.969342 +1, shanghai, black, 1, 1575129624000, 28, 15.838347 +1, shanghai, black, 1, 1575129625000, 19, 19.982738 +1, shanghai, black, 1, 1575129626000, 24, 19.854656 +1, shanghai, black, 1, 1575129627000, 34, 13.320561 +1, shanghai, black, 1, 1575129628000, 15, 19.560206 +1, shanghai, black, 1, 1575129629000, 15, 11.843907 +1, shanghai, black, 1, 1575129630000, 19, 18.332418 +1, shanghai, black, 1, 1575129631000, 30, 18.058718 +1, shanghai, black, 1, 1575129632000, 16, 17.185304 +1, shanghai, black, 1, 1575129633000, 29, 18.958033 +1, shanghai, black, 1, 1575129634000, 25, 10.187132 +1, shanghai, black, 1, 1575129635000, 33, 14.235532 +1, shanghai, black, 1, 1575129636000, 19, 14.326982 +1, shanghai, black, 1, 1575129637000, 29, 18.557044 +1, shanghai, black, 1, 1575129638000, 19, 16.590305 +1, shanghai, black, 1, 1575129639000, 21, 15.034868 +1, shanghai, black, 1, 1575129640000, 27, 10.231096 +1, shanghai, black, 1, 1575129641000, 17, 12.611756 +1, shanghai, black, 1, 1575129642000, 32, 13.148048 +1, shanghai, black, 1, 1575129643000, 20, 18.997501 +1, shanghai, black, 1, 1575129644000, 34, 11.001994 +1, shanghai, black, 1, 1575129645000, 24, 17.698891 +1, shanghai, black, 1, 1575129646000, 16, 12.623819 +1, shanghai, black, 1, 1575129647000, 26, 12.146537 +1, shanghai, black, 1, 1575129648000, 28, 13.511343 +1, shanghai, black, 1, 1575129649000, 34, 15.783513 +1, shanghai, black, 1, 1575129650000, 23, 11.198505 +1, shanghai, black, 1, 1575129651000, 23, 10.537856 +1, shanghai, black, 1, 1575129652000, 29, 13.241740 +1, shanghai, black, 1, 1575129653000, 30, 13.492887 +1, shanghai, black, 1, 1575129654000, 21, 19.687462 +1, shanghai, black, 1, 1575129655000, 21, 12.079431 +1, shanghai, black, 1, 1575129656000, 29, 13.022024 +1, shanghai, black, 1, 1575129657000, 34, 11.340842 +1, shanghai, black, 1, 1575129658000, 18, 16.408648 +1, shanghai, black, 1, 1575129659000, 22, 18.098742 +1, shanghai, black, 1, 1575129660000, 29, 19.427574 +1, shanghai, black, 1, 1575129661000, 26, 14.946804 +1, shanghai, black, 1, 1575129662000, 18, 17.107439 +1, shanghai, black, 1, 1575129663000, 31, 14.076329 +1, shanghai, black, 1, 1575129664000, 32, 19.443971 +1, shanghai, black, 1, 1575129665000, 31, 12.886383 +1, shanghai, black, 1, 1575129666000, 20, 14.525845 +1, shanghai, black, 1, 1575129667000, 24, 13.153620 +1, shanghai, black, 1, 1575129668000, 22, 17.515631 +1, shanghai, black, 1, 1575129669000, 24, 16.697146 +1, shanghai, black, 1, 1575129670000, 34, 14.588845 +1, shanghai, black, 1, 1575129671000, 17, 14.815298 +1, shanghai, black, 1, 1575129672000, 20, 19.506232 +1, shanghai, black, 1, 1575129673000, 28, 17.425147 +1, shanghai, black, 1, 1575129674000, 15, 10.661514 +1, shanghai, black, 1, 1575129675000, 20, 19.254679 +1, shanghai, black, 1, 1575129676000, 24, 14.094194 +1, shanghai, black, 1, 1575129677000, 31, 10.972616 +1, shanghai, black, 1, 1575129678000, 15, 10.044447 +1, shanghai, black, 1, 1575129679000, 32, 11.093067 +1, shanghai, black, 1, 1575129680000, 33, 12.570554 +1, shanghai, black, 1, 1575129681000, 28, 19.264114 +1, shanghai, black, 1, 1575129682000, 23, 13.038871 +1, shanghai, black, 1, 1575129683000, 20, 11.764896 +1, shanghai, black, 1, 1575129684000, 19, 17.051371 +1, shanghai, black, 1, 1575129685000, 18, 12.503689 +1, shanghai, black, 1, 1575129686000, 28, 17.512406 +1, shanghai, black, 1, 1575129687000, 28, 18.409932 +1, shanghai, black, 1, 1575129688000, 22, 10.132855 +1, shanghai, black, 1, 1575129689000, 23, 18.993715 +1, shanghai, black, 1, 1575129690000, 26, 10.430004 +1, shanghai, black, 1, 1575129691000, 21, 10.510941 +1, shanghai, black, 1, 1575129692000, 26, 14.756974 +1, shanghai, black, 1, 1575129693000, 32, 10.407199 +1, shanghai, black, 1, 1575129694000, 29, 12.601247 +1, shanghai, black, 1, 1575129695000, 25, 19.604975 +1, shanghai, black, 1, 1575129696000, 22, 12.293202 +1, shanghai, black, 1, 1575129697000, 19, 17.564823 +1, shanghai, black, 1, 1575129698000, 28, 13.389774 +1, shanghai, black, 1, 1575129699000, 31, 19.859944 +2, guangzhou, green, 2, 1575129600000, 17, 12.496550 +2, guangzhou, green, 2, 1575129601000, 29, 17.897172 +2, guangzhou, green, 2, 1575129602000, 34, 16.574690 +2, guangzhou, green, 2, 1575129603000, 15, 16.575054 +2, guangzhou, green, 2, 1575129604000, 34, 19.192545 +2, guangzhou, green, 2, 1575129605000, 19, 15.203920 +2, guangzhou, green, 2, 1575129606000, 28, 12.481825 +2, guangzhou, green, 2, 1575129607000, 30, 16.997891 +2, guangzhou, green, 2, 1575129608000, 24, 15.122720 +2, guangzhou, green, 2, 1575129609000, 20, 16.220016 +2, guangzhou, green, 2, 1575129610000, 16, 11.405753 +2, guangzhou, green, 2, 1575129611000, 26, 19.440151 +2, guangzhou, green, 2, 1575129612000, 24, 12.457920 +2, guangzhou, green, 2, 1575129613000, 30, 15.369806 +2, guangzhou, green, 2, 1575129614000, 27, 16.716676 +2, guangzhou, green, 2, 1575129615000, 32, 17.338548 +2, guangzhou, green, 2, 1575129616000, 28, 14.234738 +2, guangzhou, green, 2, 1575129617000, 34, 19.530447 +2, guangzhou, green, 2, 1575129618000, 15, 14.551896 +2, guangzhou, green, 2, 1575129619000, 21, 17.198856 +2, guangzhou, green, 2, 1575129620000, 19, 17.425909 +2, guangzhou, green, 2, 1575129621000, 29, 16.825216 +2, guangzhou, green, 2, 1575129622000, 28, 12.485828 +2, guangzhou, green, 2, 1575129623000, 25, 17.699710 +2, guangzhou, green, 2, 1575129624000, 30, 12.866378 +2, guangzhou, green, 2, 1575129625000, 18, 11.985615 +2, guangzhou, green, 2, 1575129626000, 24, 16.359533 +2, guangzhou, green, 2, 1575129627000, 20, 14.123154 +2, guangzhou, green, 2, 1575129628000, 23, 11.311899 +2, guangzhou, green, 2, 1575129629000, 29, 18.450350 +2, guangzhou, green, 2, 1575129630000, 29, 17.783038 +2, guangzhou, green, 2, 1575129631000, 22, 16.543795 +2, guangzhou, green, 2, 1575129632000, 25, 13.939652 +2, guangzhou, green, 2, 1575129633000, 22, 15.658666 +2, guangzhou, green, 2, 1575129634000, 24, 14.524828 +2, guangzhou, green, 2, 1575129635000, 15, 16.428353 +2, guangzhou, green, 2, 1575129636000, 16, 18.103802 +2, guangzhou, green, 2, 1575129637000, 28, 10.814747 +2, guangzhou, green, 2, 1575129638000, 21, 14.906347 +2, guangzhou, green, 2, 1575129639000, 25, 16.276587 +2, guangzhou, green, 2, 1575129640000, 28, 17.932145 +2, guangzhou, green, 2, 1575129641000, 34, 12.543257 +2, guangzhou, green, 2, 1575129642000, 21, 14.202174 +2, guangzhou, green, 2, 1575129643000, 19, 12.169968 +2, guangzhou, green, 2, 1575129644000, 31, 15.638443 +2, guangzhou, green, 2, 1575129645000, 23, 13.675736 +2, guangzhou, green, 2, 1575129646000, 20, 19.002998 +2, guangzhou, green, 2, 1575129647000, 34, 14.451299 +2, guangzhou, green, 2, 1575129648000, 29, 16.676133 +2, guangzhou, green, 2, 1575129649000, 31, 10.066270 +2, guangzhou, green, 2, 1575129650000, 26, 17.824551 +2, guangzhou, green, 2, 1575129651000, 34, 18.082416 +2, guangzhou, green, 2, 1575129652000, 28, 16.099497 +2, guangzhou, green, 2, 1575129653000, 16, 12.265096 +2, guangzhou, green, 2, 1575129654000, 34, 12.468646 +2, guangzhou, green, 2, 1575129655000, 16, 11.534757 +2, guangzhou, green, 2, 1575129656000, 16, 19.092035 +2, guangzhou, green, 2, 1575129657000, 20, 13.272631 +2, guangzhou, green, 2, 1575129658000, 19, 14.302918 +2, guangzhou, green, 2, 1575129659000, 31, 10.996095 +2, guangzhou, green, 2, 1575129660000, 17, 15.220791 +2, guangzhou, green, 2, 1575129661000, 28, 18.482870 +2, guangzhou, green, 2, 1575129662000, 17, 15.654042 +2, guangzhou, green, 2, 1575129663000, 30, 12.753545 +2, guangzhou, green, 2, 1575129664000, 18, 19.292998 +2, guangzhou, green, 2, 1575129665000, 33, 12.108711 +2, guangzhou, green, 2, 1575129666000, 34, 14.724292 +2, guangzhou, green, 2, 1575129667000, 28, 13.754784 +2, guangzhou, green, 2, 1575129668000, 22, 17.879010 +2, guangzhou, green, 2, 1575129669000, 27, 10.963891 +2, guangzhou, green, 2, 1575129670000, 32, 15.231074 +2, guangzhou, green, 2, 1575129671000, 24, 11.802718 +2, guangzhou, green, 2, 1575129672000, 21, 13.681011 +2, guangzhou, green, 2, 1575129673000, 19, 10.910179 +2, guangzhou, green, 2, 1575129674000, 29, 13.944866 +2, guangzhou, green, 2, 1575129675000, 18, 17.558532 +2, guangzhou, green, 2, 1575129676000, 19, 13.186824 +2, guangzhou, green, 2, 1575129677000, 25, 12.784448 +2, guangzhou, green, 2, 1575129678000, 28, 15.774681 +2, guangzhou, green, 2, 1575129679000, 29, 11.104902 +2, guangzhou, green, 2, 1575129680000, 16, 13.809837 +2, guangzhou, green, 2, 1575129681000, 16, 18.830369 +2, guangzhou, green, 2, 1575129682000, 32, 11.798459 +2, guangzhou, green, 2, 1575129683000, 17, 11.893725 +2, guangzhou, green, 2, 1575129684000, 16, 11.646352 +2, guangzhou, green, 2, 1575129685000, 30, 16.511740 +2, guangzhou, green, 2, 1575129686000, 27, 11.837594 +2, guangzhou, green, 2, 1575129687000, 26, 17.312381 +2, guangzhou, green, 2, 1575129688000, 16, 12.512595 +2, guangzhou, green, 2, 1575129689000, 27, 10.224634 +2, guangzhou, green, 2, 1575129690000, 31, 15.000720 +2, guangzhou, green, 2, 1575129691000, 18, 12.810097 +2, guangzhou, green, 2, 1575129692000, 24, 19.154830 +2, guangzhou, green, 2, 1575129693000, 29, 17.029148 +2, guangzhou, green, 2, 1575129694000, 25, 19.416777 +2, guangzhou, green, 2, 1575129695000, 17, 17.692554 +2, guangzhou, green, 2, 1575129696000, 25, 10.939226 +2, guangzhou, green, 2, 1575129697000, 23, 10.632203 +2, guangzhou, green, 2, 1575129698000, 21, 17.977449 +2, guangzhou, green, 2, 1575129699000, 20, 14.047369 +3, shenzhen, yellow, 0, 1575129600000, 17, 13.181688 +3, shenzhen, yellow, 0, 1575129601000, 26, 17.912070 +3, shenzhen, yellow, 0, 1575129602000, 28, 11.660286 +3, shenzhen, yellow, 0, 1575129603000, 28, 16.496510 +3, shenzhen, yellow, 0, 1575129604000, 32, 16.164662 +3, shenzhen, yellow, 0, 1575129605000, 16, 19.604285 +3, shenzhen, yellow, 0, 1575129606000, 33, 19.308120 +3, shenzhen, yellow, 0, 1575129607000, 16, 16.755204 +3, shenzhen, yellow, 0, 1575129608000, 33, 10.658284 +3, shenzhen, yellow, 0, 1575129609000, 30, 17.241293 +3, shenzhen, yellow, 0, 1575129610000, 16, 18.088522 +3, shenzhen, yellow, 0, 1575129611000, 31, 15.455248 +3, shenzhen, yellow, 0, 1575129612000, 29, 10.505713 +3, shenzhen, yellow, 0, 1575129613000, 28, 16.189388 +3, shenzhen, yellow, 0, 1575129614000, 16, 14.723009 +3, shenzhen, yellow, 0, 1575129615000, 27, 15.670388 +3, shenzhen, yellow, 0, 1575129616000, 29, 16.080214 +3, shenzhen, yellow, 0, 1575129617000, 18, 18.544671 +3, shenzhen, yellow, 0, 1575129618000, 23, 16.947663 +3, shenzhen, yellow, 0, 1575129619000, 15, 16.917797 +3, shenzhen, yellow, 0, 1575129620000, 25, 17.888324 +3, shenzhen, yellow, 0, 1575129621000, 34, 18.520162 +3, shenzhen, yellow, 0, 1575129622000, 29, 10.271190 +3, shenzhen, yellow, 0, 1575129623000, 26, 11.781460 +3, shenzhen, yellow, 0, 1575129624000, 16, 17.737292 +3, shenzhen, yellow, 0, 1575129625000, 15, 13.730896 +3, shenzhen, yellow, 0, 1575129626000, 28, 12.161647 +3, shenzhen, yellow, 0, 1575129627000, 33, 15.012675 +3, shenzhen, yellow, 0, 1575129628000, 28, 12.880752 +3, shenzhen, yellow, 0, 1575129629000, 28, 12.418301 +3, shenzhen, yellow, 0, 1575129630000, 16, 15.744831 +3, shenzhen, yellow, 0, 1575129631000, 23, 10.551453 +3, shenzhen, yellow, 0, 1575129632000, 32, 11.782227 +3, shenzhen, yellow, 0, 1575129633000, 32, 16.531595 +3, shenzhen, yellow, 0, 1575129634000, 19, 12.512090 +3, shenzhen, yellow, 0, 1575129635000, 22, 16.554170 +3, shenzhen, yellow, 0, 1575129636000, 20, 12.593234 +3, shenzhen, yellow, 0, 1575129637000, 23, 10.267977 +3, shenzhen, yellow, 0, 1575129638000, 19, 18.470475 +3, shenzhen, yellow, 0, 1575129639000, 27, 11.479857 +3, shenzhen, yellow, 0, 1575129640000, 29, 17.387964 +3, shenzhen, yellow, 0, 1575129641000, 28, 18.605927 +3, shenzhen, yellow, 0, 1575129642000, 28, 14.150780 +3, shenzhen, yellow, 0, 1575129643000, 30, 12.112675 +3, shenzhen, yellow, 0, 1575129644000, 20, 12.126206 +3, shenzhen, yellow, 0, 1575129645000, 34, 11.627235 +3, shenzhen, yellow, 0, 1575129646000, 34, 18.202179 +3, shenzhen, yellow, 0, 1575129647000, 30, 12.447241 +3, shenzhen, yellow, 0, 1575129648000, 15, 12.542049 +3, shenzhen, yellow, 0, 1575129649000, 34, 12.043278 +3, shenzhen, yellow, 0, 1575129650000, 26, 15.254272 +3, shenzhen, yellow, 0, 1575129651000, 33, 14.655641 +3, shenzhen, yellow, 0, 1575129652000, 21, 17.835511 +3, shenzhen, yellow, 0, 1575129653000, 30, 18.979520 +3, shenzhen, yellow, 0, 1575129654000, 26, 12.942195 +3, shenzhen, yellow, 0, 1575129655000, 29, 19.775977 +3, shenzhen, yellow, 0, 1575129656000, 31, 14.242160 +3, shenzhen, yellow, 0, 1575129657000, 15, 10.568320 +3, shenzhen, yellow, 0, 1575129658000, 21, 12.407690 +3, shenzhen, yellow, 0, 1575129659000, 23, 14.165327 +3, shenzhen, yellow, 0, 1575129660000, 27, 11.292074 +3, shenzhen, yellow, 0, 1575129661000, 18, 11.011734 +3, shenzhen, yellow, 0, 1575129662000, 22, 18.100115 +3, shenzhen, yellow, 0, 1575129663000, 18, 11.857615 +3, shenzhen, yellow, 0, 1575129664000, 20, 15.402887 +3, shenzhen, yellow, 0, 1575129665000, 32, 17.952958 +3, shenzhen, yellow, 0, 1575129666000, 16, 15.407510 +3, shenzhen, yellow, 0, 1575129667000, 23, 17.344025 +3, shenzhen, yellow, 0, 1575129668000, 34, 13.251864 +3, shenzhen, yellow, 0, 1575129669000, 31, 15.406216 +3, shenzhen, yellow, 0, 1575129670000, 19, 16.385551 +3, shenzhen, yellow, 0, 1575129671000, 32, 13.493399 +3, shenzhen, yellow, 0, 1575129672000, 27, 11.856057 +3, shenzhen, yellow, 0, 1575129673000, 30, 12.977649 +3, shenzhen, yellow, 0, 1575129674000, 19, 18.339123 +3, shenzhen, yellow, 0, 1575129675000, 23, 16.442236 +3, shenzhen, yellow, 0, 1575129676000, 18, 19.140272 +3, shenzhen, yellow, 0, 1575129677000, 27, 16.562737 +3, shenzhen, yellow, 0, 1575129678000, 16, 10.993309 +3, shenzhen, yellow, 0, 1575129679000, 27, 15.137385 +3, shenzhen, yellow, 0, 1575129680000, 15, 18.754543 +3, shenzhen, yellow, 0, 1575129681000, 26, 10.116102 +3, shenzhen, yellow, 0, 1575129682000, 29, 14.024587 +3, shenzhen, yellow, 0, 1575129683000, 31, 14.016558 +3, shenzhen, yellow, 0, 1575129684000, 19, 10.671284 +3, shenzhen, yellow, 0, 1575129685000, 32, 14.641297 +3, shenzhen, yellow, 0, 1575129686000, 18, 12.823655 +3, shenzhen, yellow, 0, 1575129687000, 30, 19.260822 +3, shenzhen, yellow, 0, 1575129688000, 30, 16.105202 +3, shenzhen, yellow, 0, 1575129689000, 22, 10.230556 +3, shenzhen, yellow, 0, 1575129690000, 17, 10.732315 +3, shenzhen, yellow, 0, 1575129691000, 31, 15.320282 +3, shenzhen, yellow, 0, 1575129692000, 24, 17.208577 +3, shenzhen, yellow, 0, 1575129693000, 16, 12.506668 +3, shenzhen, yellow, 0, 1575129694000, 17, 18.911875 +3, shenzhen, yellow, 0, 1575129695000, 15, 12.665488 +3, shenzhen, yellow, 0, 1575129696000, 18, 11.283357 +3, shenzhen, yellow, 0, 1575129697000, 15, 13.186590 +3, shenzhen, yellow, 0, 1575129698000, 34, 15.659293 +3, shenzhen, yellow, 0, 1575129699000, 30, 12.898771 +4, hangzhou, blue, 1, 1575129600000, 33, 18.262612 +4, hangzhou, blue, 1, 1575129601000, 20, 11.612149 +4, hangzhou, blue, 1, 1575129602000, 26, 17.261305 +4, hangzhou, blue, 1, 1575129603000, 27, 19.240210 +4, hangzhou, blue, 1, 1575129604000, 27, 17.412985 +4, hangzhou, blue, 1, 1575129605000, 19, 12.835781 +4, hangzhou, blue, 1, 1575129606000, 24, 13.087003 +4, hangzhou, blue, 1, 1575129607000, 24, 13.701138 +4, hangzhou, blue, 1, 1575129608000, 31, 10.076716 +4, hangzhou, blue, 1, 1575129609000, 27, 14.703408 +4, hangzhou, blue, 1, 1575129610000, 19, 17.503874 +4, hangzhou, blue, 1, 1575129611000, 21, 18.607839 +4, hangzhou, blue, 1, 1575129612000, 16, 15.416387 +4, hangzhou, blue, 1, 1575129613000, 19, 19.477280 +4, hangzhou, blue, 1, 1575129614000, 15, 17.374174 +4, hangzhou, blue, 1, 1575129615000, 30, 10.732940 +4, hangzhou, blue, 1, 1575129616000, 33, 16.863960 +4, hangzhou, blue, 1, 1575129617000, 16, 10.413205 +4, hangzhou, blue, 1, 1575129618000, 27, 14.130482 +4, hangzhou, blue, 1, 1575129619000, 19, 10.731398 +4, hangzhou, blue, 1, 1575129620000, 27, 11.713011 +4, hangzhou, blue, 1, 1575129621000, 26, 19.063695 +4, hangzhou, blue, 1, 1575129622000, 26, 16.309728 +4, hangzhou, blue, 1, 1575129623000, 33, 12.229796 +4, hangzhou, blue, 1, 1575129624000, 16, 15.176824 +4, hangzhou, blue, 1, 1575129625000, 31, 12.417684 +4, hangzhou, blue, 1, 1575129626000, 31, 17.284961 +4, hangzhou, blue, 1, 1575129627000, 24, 12.530188 +4, hangzhou, blue, 1, 1575129628000, 32, 15.067641 +4, hangzhou, blue, 1, 1575129629000, 32, 18.546511 +4, hangzhou, blue, 1, 1575129630000, 21, 13.049847 +4, hangzhou, blue, 1, 1575129631000, 19, 17.509510 +4, hangzhou, blue, 1, 1575129632000, 24, 13.289143 +4, hangzhou, blue, 1, 1575129633000, 18, 19.179227 +4, hangzhou, blue, 1, 1575129634000, 25, 18.128126 +4, hangzhou, blue, 1, 1575129635000, 26, 19.627125 +4, hangzhou, blue, 1, 1575129636000, 25, 16.090493 +4, hangzhou, blue, 1, 1575129637000, 19, 19.093488 +4, hangzhou, blue, 1, 1575129638000, 32, 17.563422 +4, hangzhou, blue, 1, 1575129639000, 16, 12.867582 +4, hangzhou, blue, 1, 1575129640000, 32, 11.606473 +4, hangzhou, blue, 1, 1575129641000, 31, 12.321989 +4, hangzhou, blue, 1, 1575129642000, 30, 17.043967 +4, hangzhou, blue, 1, 1575129643000, 20, 14.553511 +4, hangzhou, blue, 1, 1575129644000, 34, 19.068052 +4, hangzhou, blue, 1, 1575129645000, 18, 15.992107 +4, hangzhou, blue, 1, 1575129646000, 34, 11.308531 +4, hangzhou, blue, 1, 1575129647000, 18, 19.053088 +4, hangzhou, blue, 1, 1575129648000, 25, 18.617738 +4, hangzhou, blue, 1, 1575129649000, 25, 14.190978 +4, hangzhou, blue, 1, 1575129650000, 22, 18.049969 +4, hangzhou, blue, 1, 1575129651000, 19, 16.890290 +4, hangzhou, blue, 1, 1575129652000, 26, 10.055835 +4, hangzhou, blue, 1, 1575129653000, 32, 18.772190 +4, hangzhou, blue, 1, 1575129654000, 18, 15.347443 +4, hangzhou, blue, 1, 1575129655000, 19, 15.611078 +4, hangzhou, blue, 1, 1575129656000, 24, 11.345082 +4, hangzhou, blue, 1, 1575129657000, 27, 10.883929 +4, hangzhou, blue, 1, 1575129658000, 25, 19.810161 +4, hangzhou, blue, 1, 1575129659000, 33, 10.159027 +4, hangzhou, blue, 1, 1575129660000, 20, 11.900341 +4, hangzhou, blue, 1, 1575129661000, 24, 12.395535 +4, hangzhou, blue, 1, 1575129662000, 25, 13.832159 +4, hangzhou, blue, 1, 1575129663000, 26, 15.066722 +4, hangzhou, blue, 1, 1575129664000, 24, 12.441406 +4, hangzhou, blue, 1, 1575129665000, 22, 16.281200 +4, hangzhou, blue, 1, 1575129666000, 21, 14.116693 +4, hangzhou, blue, 1, 1575129667000, 28, 12.441770 +4, hangzhou, blue, 1, 1575129668000, 18, 11.402083 +4, hangzhou, blue, 1, 1575129669000, 28, 15.167379 +4, hangzhou, blue, 1, 1575129670000, 16, 15.433220 +4, hangzhou, blue, 1, 1575129671000, 23, 10.211150 +4, hangzhou, blue, 1, 1575129672000, 19, 19.501424 +4, hangzhou, blue, 1, 1575129673000, 18, 17.974835 +4, hangzhou, blue, 1, 1575129674000, 26, 12.904804 +4, hangzhou, blue, 1, 1575129675000, 27, 17.012268 +4, hangzhou, blue, 1, 1575129676000, 34, 11.223162 +4, hangzhou, blue, 1, 1575129677000, 34, 11.008873 +4, hangzhou, blue, 1, 1575129678000, 18, 13.466623 +4, hangzhou, blue, 1, 1575129679000, 25, 11.714342 +4, hangzhou, blue, 1, 1575129680000, 32, 15.193444 +4, hangzhou, blue, 1, 1575129681000, 17, 13.998644 +4, hangzhou, blue, 1, 1575129682000, 27, 12.180101 +4, hangzhou, blue, 1, 1575129683000, 17, 16.405635 +4, hangzhou, blue, 1, 1575129684000, 33, 16.027225 +4, hangzhou, blue, 1, 1575129685000, 28, 17.864308 +4, hangzhou, blue, 1, 1575129686000, 20, 16.057140 +4, hangzhou, blue, 1, 1575129687000, 26, 17.240991 +4, hangzhou, blue, 1, 1575129688000, 31, 11.178153 +4, hangzhou, blue, 1, 1575129689000, 29, 11.688910 +4, hangzhou, blue, 1, 1575129690000, 24, 15.830195 +4, hangzhou, blue, 1, 1575129691000, 33, 13.083720 +4, hangzhou, blue, 1, 1575129692000, 25, 15.003569 +4, hangzhou, blue, 1, 1575129693000, 16, 14.412837 +4, hangzhou, blue, 1, 1575129694000, 26, 18.930523 +4, hangzhou, blue, 1, 1575129695000, 19, 10.657332 +4, hangzhou, blue, 1, 1575129696000, 28, 11.193432 +4, hangzhou, blue, 1, 1575129697000, 17, 18.000253 +4, hangzhou, blue, 1, 1575129698000, 21, 15.908098 +4, hangzhou, blue, 1, 1575129699000, 25, 14.506726 +5, nanjing, white, 2, 1575129600000, 20, 17.327941 +5, nanjing, white, 2, 1575129601000, 18, 14.271766 +5, nanjing, white, 2, 1575129602000, 26, 19.593114 +5, nanjing, white, 2, 1575129603000, 19, 13.142911 +5, nanjing, white, 2, 1575129604000, 27, 15.166424 +5, nanjing, white, 2, 1575129605000, 28, 11.804980 +5, nanjing, white, 2, 1575129606000, 24, 17.625403 +5, nanjing, white, 2, 1575129607000, 19, 11.373316 +5, nanjing, white, 2, 1575129608000, 34, 19.434849 +5, nanjing, white, 2, 1575129609000, 31, 14.078995 +5, nanjing, white, 2, 1575129610000, 27, 11.647533 +5, nanjing, white, 2, 1575129611000, 25, 16.624403 +5, nanjing, white, 2, 1575129612000, 28, 12.862567 +5, nanjing, white, 2, 1575129613000, 20, 18.218963 +5, nanjing, white, 2, 1575129614000, 17, 10.021056 +5, nanjing, white, 2, 1575129615000, 30, 16.042743 +5, nanjing, white, 2, 1575129616000, 26, 11.424560 +5, nanjing, white, 2, 1575129617000, 21, 10.094065 +5, nanjing, white, 2, 1575129618000, 31, 15.982905 +5, nanjing, white, 2, 1575129619000, 17, 15.925533 +5, nanjing, white, 2, 1575129620000, 30, 15.622108 +5, nanjing, white, 2, 1575129621000, 18, 19.320662 +5, nanjing, white, 2, 1575129622000, 19, 14.068873 +5, nanjing, white, 2, 1575129623000, 15, 15.213653 +5, nanjing, white, 2, 1575129624000, 32, 16.028939 +5, nanjing, white, 2, 1575129625000, 28, 17.858151 +5, nanjing, white, 2, 1575129626000, 18, 11.261528 +5, nanjing, white, 2, 1575129627000, 21, 10.262692 +5, nanjing, white, 2, 1575129628000, 27, 13.190850 +5, nanjing, white, 2, 1575129629000, 17, 15.404541 +5, nanjing, white, 2, 1575129630000, 27, 10.852643 +5, nanjing, white, 2, 1575129631000, 23, 13.134271 +5, nanjing, white, 2, 1575129632000, 22, 19.928938 +5, nanjing, white, 2, 1575129633000, 19, 10.683633 +5, nanjing, white, 2, 1575129634000, 29, 15.450679 +5, nanjing, white, 2, 1575129635000, 20, 17.032495 +5, nanjing, white, 2, 1575129636000, 21, 16.081343 +5, nanjing, white, 2, 1575129637000, 31, 15.173797 +5, nanjing, white, 2, 1575129638000, 17, 18.062092 +5, nanjing, white, 2, 1575129639000, 22, 14.139422 +5, nanjing, white, 2, 1575129640000, 30, 15.335309 +5, nanjing, white, 2, 1575129641000, 30, 18.381148 +5, nanjing, white, 2, 1575129642000, 28, 15.640517 +5, nanjing, white, 2, 1575129643000, 15, 10.603125 +5, nanjing, white, 2, 1575129644000, 18, 12.096534 +5, nanjing, white, 2, 1575129645000, 27, 17.015026 +5, nanjing, white, 2, 1575129646000, 24, 15.616134 +5, nanjing, white, 2, 1575129647000, 32, 15.552120 +5, nanjing, white, 2, 1575129648000, 18, 13.846167 +5, nanjing, white, 2, 1575129649000, 32, 15.406105 +5, nanjing, white, 2, 1575129650000, 19, 14.396603 +5, nanjing, white, 2, 1575129651000, 34, 15.660214 +5, nanjing, white, 2, 1575129652000, 29, 19.035787 +5, nanjing, white, 2, 1575129653000, 26, 14.746065 +5, nanjing, white, 2, 1575129654000, 29, 14.144764 +5, nanjing, white, 2, 1575129655000, 32, 11.953327 +5, nanjing, white, 2, 1575129656000, 16, 11.546639 +5, nanjing, white, 2, 1575129657000, 20, 12.779206 +5, nanjing, white, 2, 1575129658000, 16, 16.364659 +5, nanjing, white, 2, 1575129659000, 29, 10.204467 +5, nanjing, white, 2, 1575129660000, 22, 18.824781 +5, nanjing, white, 2, 1575129661000, 26, 18.795199 +5, nanjing, white, 2, 1575129662000, 16, 12.142987 +5, nanjing, white, 2, 1575129663000, 30, 13.810269 +5, nanjing, white, 2, 1575129664000, 28, 19.670323 +5, nanjing, white, 2, 1575129665000, 17, 10.776152 +5, nanjing, white, 2, 1575129666000, 31, 18.095779 +5, nanjing, white, 2, 1575129667000, 34, 12.720668 +5, nanjing, white, 2, 1575129668000, 27, 18.285647 +5, nanjing, white, 2, 1575129669000, 18, 15.929034 +5, nanjing, white, 2, 1575129670000, 27, 10.397290 +5, nanjing, white, 2, 1575129671000, 29, 12.914206 +5, nanjing, white, 2, 1575129672000, 29, 11.560832 +5, nanjing, white, 2, 1575129673000, 21, 15.487904 +5, nanjing, white, 2, 1575129674000, 28, 11.585003 +5, nanjing, white, 2, 1575129675000, 30, 15.042832 +5, nanjing, white, 2, 1575129676000, 23, 12.408045 +5, nanjing, white, 2, 1575129677000, 15, 17.353187 +5, nanjing, white, 2, 1575129678000, 31, 18.084138 +5, nanjing, white, 2, 1575129679000, 34, 10.756624 +5, nanjing, white, 2, 1575129680000, 19, 13.270267 +5, nanjing, white, 2, 1575129681000, 27, 16.639891 +5, nanjing, white, 2, 1575129682000, 31, 14.671892 +5, nanjing, white, 2, 1575129683000, 27, 10.554016 +5, nanjing, white, 2, 1575129684000, 16, 14.507173 +5, nanjing, white, 2, 1575129685000, 19, 11.977540 +5, nanjing, white, 2, 1575129686000, 26, 13.286239 +5, nanjing, white, 2, 1575129687000, 30, 17.858074 +5, nanjing, white, 2, 1575129688000, 24, 19.446978 +5, nanjing, white, 2, 1575129689000, 21, 19.698453 +5, nanjing, white, 2, 1575129690000, 21, 19.494527 +5, nanjing, white, 2, 1575129691000, 34, 11.911972 +5, nanjing, white, 2, 1575129692000, 16, 16.283904 +5, nanjing, white, 2, 1575129693000, 29, 12.346139 +5, nanjing, white, 2, 1575129694000, 25, 10.589538 +5, nanjing, white, 2, 1575129695000, 23, 16.730700 +5, nanjing, white, 2, 1575129696000, 33, 16.858111 +5, nanjing, white, 2, 1575129697000, 27, 13.779923 +5, nanjing, white, 2, 1575129698000, 20, 11.035122 +5, nanjing, white, 2, 1575129699000, 34, 10.444430 +6, wuhan, black, 0, 1575129600000, 30, 13.948532 +6, wuhan, black, 0, 1575129601000, 28, 12.860198 +6, wuhan, black, 0, 1575129602000, 32, 14.979606 +6, wuhan, black, 0, 1575129603000, 22, 11.844284 +6, wuhan, black, 0, 1575129604000, 16, 19.507148 +6, wuhan, black, 0, 1575129605000, 22, 14.315308 +6, wuhan, black, 0, 1575129606000, 19, 13.773210 +6, wuhan, black, 0, 1575129607000, 31, 18.224420 +6, wuhan, black, 0, 1575129608000, 28, 15.962573 +6, wuhan, black, 0, 1575129609000, 32, 12.855757 +6, wuhan, black, 0, 1575129610000, 32, 11.010859 +6, wuhan, black, 0, 1575129611000, 33, 11.110190 +6, wuhan, black, 0, 1575129612000, 24, 18.628721 +6, wuhan, black, 0, 1575129613000, 30, 16.044831 +6, wuhan, black, 0, 1575129614000, 29, 14.617854 +6, wuhan, black, 0, 1575129615000, 31, 15.591157 +6, wuhan, black, 0, 1575129616000, 31, 12.486593 +6, wuhan, black, 0, 1575129617000, 21, 17.680152 +6, wuhan, black, 0, 1575129618000, 27, 10.341043 +6, wuhan, black, 0, 1575129619000, 28, 13.359138 +6, wuhan, black, 0, 1575129620000, 30, 19.654174 +6, wuhan, black, 0, 1575129621000, 28, 18.037469 +6, wuhan, black, 0, 1575129622000, 25, 18.404051 +6, wuhan, black, 0, 1575129623000, 16, 14.856599 +6, wuhan, black, 0, 1575129624000, 29, 19.552920 +6, wuhan, black, 0, 1575129625000, 17, 13.434096 +6, wuhan, black, 0, 1575129626000, 27, 17.019559 +6, wuhan, black, 0, 1575129627000, 26, 15.173058 +6, wuhan, black, 0, 1575129628000, 32, 12.826764 +6, wuhan, black, 0, 1575129629000, 26, 17.535447 +6, wuhan, black, 0, 1575129630000, 21, 14.249137 +6, wuhan, black, 0, 1575129631000, 17, 17.047627 +6, wuhan, black, 0, 1575129632000, 27, 16.650397 +6, wuhan, black, 0, 1575129633000, 15, 13.081019 +6, wuhan, black, 0, 1575129634000, 31, 16.957137 +6, wuhan, black, 0, 1575129635000, 16, 14.120849 +6, wuhan, black, 0, 1575129636000, 20, 19.559244 +6, wuhan, black, 0, 1575129637000, 24, 17.951023 +6, wuhan, black, 0, 1575129638000, 28, 12.034821 +6, wuhan, black, 0, 1575129639000, 27, 19.410968 +6, wuhan, black, 0, 1575129640000, 32, 19.163660 +6, wuhan, black, 0, 1575129641000, 19, 18.268331 +6, wuhan, black, 0, 1575129642000, 17, 13.487017 +6, wuhan, black, 0, 1575129643000, 15, 19.085113 +6, wuhan, black, 0, 1575129644000, 31, 18.786878 +6, wuhan, black, 0, 1575129645000, 25, 17.901693 +6, wuhan, black, 0, 1575129646000, 16, 13.458948 +6, wuhan, black, 0, 1575129647000, 17, 16.372939 +6, wuhan, black, 0, 1575129648000, 20, 16.547324 +6, wuhan, black, 0, 1575129649000, 22, 14.801144 +6, wuhan, black, 0, 1575129650000, 16, 15.819640 +6, wuhan, black, 0, 1575129651000, 24, 16.569364 +6, wuhan, black, 0, 1575129652000, 29, 13.750153 +6, wuhan, black, 0, 1575129653000, 16, 14.846974 +6, wuhan, black, 0, 1575129654000, 23, 15.937862 +6, wuhan, black, 0, 1575129655000, 32, 19.969213 +6, wuhan, black, 0, 1575129656000, 17, 16.589262 +6, wuhan, black, 0, 1575129657000, 16, 15.983127 +6, wuhan, black, 0, 1575129658000, 32, 19.981177 +6, wuhan, black, 0, 1575129659000, 30, 15.526706 +6, wuhan, black, 0, 1575129660000, 30, 11.473325 +6, wuhan, black, 0, 1575129661000, 34, 14.734314 +6, wuhan, black, 0, 1575129662000, 31, 19.298395 +6, wuhan, black, 0, 1575129663000, 22, 16.150773 +6, wuhan, black, 0, 1575129664000, 18, 10.211251 +6, wuhan, black, 0, 1575129665000, 23, 16.773732 +6, wuhan, black, 0, 1575129666000, 22, 14.005852 +6, wuhan, black, 0, 1575129667000, 17, 13.159840 +6, wuhan, black, 0, 1575129668000, 26, 13.747615 +6, wuhan, black, 0, 1575129669000, 26, 14.601900 +6, wuhan, black, 0, 1575129670000, 29, 10.489225 +6, wuhan, black, 0, 1575129671000, 21, 16.890829 +6, wuhan, black, 0, 1575129672000, 26, 11.081302 +6, wuhan, black, 0, 1575129673000, 26, 19.336692 +6, wuhan, black, 0, 1575129674000, 22, 13.601869 +6, wuhan, black, 0, 1575129675000, 19, 11.627652 +6, wuhan, black, 0, 1575129676000, 19, 13.767122 +6, wuhan, black, 0, 1575129677000, 17, 15.320825 +6, wuhan, black, 0, 1575129678000, 16, 13.546837 +6, wuhan, black, 0, 1575129679000, 26, 19.562339 +6, wuhan, black, 0, 1575129680000, 24, 18.861545 +6, wuhan, black, 0, 1575129681000, 22, 11.048994 +6, wuhan, black, 0, 1575129682000, 32, 18.633559 +6, wuhan, black, 0, 1575129683000, 24, 11.423349 +6, wuhan, black, 0, 1575129684000, 31, 10.958536 +6, wuhan, black, 0, 1575129685000, 27, 16.700368 +6, wuhan, black, 0, 1575129686000, 32, 19.383603 +6, wuhan, black, 0, 1575129687000, 25, 12.817186 +6, wuhan, black, 0, 1575129688000, 21, 19.289010 +6, wuhan, black, 0, 1575129689000, 21, 18.514933 +6, wuhan, black, 0, 1575129690000, 22, 19.214387 +6, wuhan, black, 0, 1575129691000, 33, 11.673355 +6, wuhan, black, 0, 1575129692000, 23, 18.321138 +6, wuhan, black, 0, 1575129693000, 29, 11.371021 +6, wuhan, black, 0, 1575129694000, 32, 10.531389 +6, wuhan, black, 0, 1575129695000, 18, 15.921944 +6, wuhan, black, 0, 1575129696000, 27, 16.780309 +6, wuhan, black, 0, 1575129697000, 29, 12.028908 +6, wuhan, black, 0, 1575129698000, 32, 14.714637 +6, wuhan, black, 0, 1575129699000, 29, 12.753968 +7, suzhou, green, 1, 1575129600000, 24, 15.501768 +7, suzhou, green, 1, 1575129601000, 18, 17.583403 +7, suzhou, green, 1, 1575129602000, 15, 14.919566 +7, suzhou, green, 1, 1575129603000, 34, 11.870620 +7, suzhou, green, 1, 1575129604000, 29, 13.098385 +7, suzhou, green, 1, 1575129605000, 16, 17.498160 +7, suzhou, green, 1, 1575129606000, 30, 19.744556 +7, suzhou, green, 1, 1575129607000, 33, 16.558870 +7, suzhou, green, 1, 1575129608000, 16, 12.532103 +7, suzhou, green, 1, 1575129609000, 16, 16.504603 +7, suzhou, green, 1, 1575129610000, 25, 11.681246 +7, suzhou, green, 1, 1575129611000, 30, 10.620805 +7, suzhou, green, 1, 1575129612000, 22, 16.687937 +7, suzhou, green, 1, 1575129613000, 25, 17.911474 +7, suzhou, green, 1, 1575129614000, 32, 11.036519 +7, suzhou, green, 1, 1575129615000, 29, 16.162914 +7, suzhou, green, 1, 1575129616000, 30, 10.425992 +7, suzhou, green, 1, 1575129617000, 34, 19.630803 +7, suzhou, green, 1, 1575129618000, 29, 17.739556 +7, suzhou, green, 1, 1575129619000, 32, 17.805220 +7, suzhou, green, 1, 1575129620000, 23, 15.547236 +7, suzhou, green, 1, 1575129621000, 19, 13.928559 +7, suzhou, green, 1, 1575129622000, 34, 15.063669 +7, suzhou, green, 1, 1575129623000, 33, 16.968293 +7, suzhou, green, 1, 1575129624000, 24, 17.425284 +7, suzhou, green, 1, 1575129625000, 29, 12.856950 +7, suzhou, green, 1, 1575129626000, 16, 10.769358 +7, suzhou, green, 1, 1575129627000, 19, 19.106196 +7, suzhou, green, 1, 1575129628000, 15, 18.987306 +7, suzhou, green, 1, 1575129629000, 18, 19.311755 +7, suzhou, green, 1, 1575129630000, 20, 11.854711 +7, suzhou, green, 1, 1575129631000, 17, 11.268703 +7, suzhou, green, 1, 1575129632000, 28, 18.451425 +7, suzhou, green, 1, 1575129633000, 30, 15.813294 +7, suzhou, green, 1, 1575129634000, 28, 14.549649 +7, suzhou, green, 1, 1575129635000, 30, 18.777474 +7, suzhou, green, 1, 1575129636000, 28, 18.789080 +7, suzhou, green, 1, 1575129637000, 22, 12.038230 +7, suzhou, green, 1, 1575129638000, 15, 10.294816 +7, suzhou, green, 1, 1575129639000, 18, 19.396735 +7, suzhou, green, 1, 1575129640000, 20, 17.763178 +7, suzhou, green, 1, 1575129641000, 27, 17.413355 +7, suzhou, green, 1, 1575129642000, 29, 12.723483 +7, suzhou, green, 1, 1575129643000, 29, 12.753222 +7, suzhou, green, 1, 1575129644000, 25, 11.097518 +7, suzhou, green, 1, 1575129645000, 27, 15.300300 +7, suzhou, green, 1, 1575129646000, 34, 11.625943 +7, suzhou, green, 1, 1575129647000, 25, 16.646308 +7, suzhou, green, 1, 1575129648000, 31, 10.940592 +7, suzhou, green, 1, 1575129649000, 25, 18.853796 +7, suzhou, green, 1, 1575129650000, 23, 16.183418 +7, suzhou, green, 1, 1575129651000, 34, 15.379113 +7, suzhou, green, 1, 1575129652000, 15, 10.424659 +7, suzhou, green, 1, 1575129653000, 25, 10.196040 +7, suzhou, green, 1, 1575129654000, 24, 15.591199 +7, suzhou, green, 1, 1575129655000, 31, 17.032220 +7, suzhou, green, 1, 1575129656000, 30, 14.349576 +7, suzhou, green, 1, 1575129657000, 21, 14.315072 +7, suzhou, green, 1, 1575129658000, 18, 12.297491 +7, suzhou, green, 1, 1575129659000, 27, 13.134474 +7, suzhou, green, 1, 1575129660000, 28, 16.510527 +7, suzhou, green, 1, 1575129661000, 21, 17.905938 +7, suzhou, green, 1, 1575129662000, 16, 14.310720 +7, suzhou, green, 1, 1575129663000, 33, 12.415139 +7, suzhou, green, 1, 1575129664000, 28, 19.899145 +7, suzhou, green, 1, 1575129665000, 32, 18.874009 +7, suzhou, green, 1, 1575129666000, 34, 16.834873 +7, suzhou, green, 1, 1575129667000, 16, 18.383447 +7, suzhou, green, 1, 1575129668000, 29, 11.365641 +7, suzhou, green, 1, 1575129669000, 34, 13.137474 +7, suzhou, green, 1, 1575129670000, 18, 13.566243 +7, suzhou, green, 1, 1575129671000, 27, 16.454975 +7, suzhou, green, 1, 1575129672000, 21, 10.957562 +7, suzhou, green, 1, 1575129673000, 24, 14.916977 +7, suzhou, green, 1, 1575129674000, 28, 12.449565 +7, suzhou, green, 1, 1575129675000, 20, 10.217084 +7, suzhou, green, 1, 1575129676000, 32, 15.026526 +7, suzhou, green, 1, 1575129677000, 20, 10.291223 +7, suzhou, green, 1, 1575129678000, 24, 13.561227 +7, suzhou, green, 1, 1575129679000, 26, 10.091348 +7, suzhou, green, 1, 1575129680000, 25, 13.574391 +7, suzhou, green, 1, 1575129681000, 33, 17.308216 +7, suzhou, green, 1, 1575129682000, 15, 11.635235 +7, suzhou, green, 1, 1575129683000, 31, 19.967076 +7, suzhou, green, 1, 1575129684000, 25, 11.849431 +7, suzhou, green, 1, 1575129685000, 31, 16.161484 +7, suzhou, green, 1, 1575129686000, 20, 15.716389 +7, suzhou, green, 1, 1575129687000, 22, 17.486091 +7, suzhou, green, 1, 1575129688000, 29, 10.390956 +7, suzhou, green, 1, 1575129689000, 18, 18.549987 +7, suzhou, green, 1, 1575129690000, 21, 12.367505 +7, suzhou, green, 1, 1575129691000, 30, 12.345558 +7, suzhou, green, 1, 1575129692000, 17, 14.100245 +7, suzhou, green, 1, 1575129693000, 19, 11.093554 +7, suzhou, green, 1, 1575129694000, 26, 13.614985 +7, suzhou, green, 1, 1575129695000, 28, 13.753683 +7, suzhou, green, 1, 1575129696000, 21, 12.691688 +7, suzhou, green, 1, 1575129697000, 29, 17.595583 +7, suzhou, green, 1, 1575129698000, 20, 13.184472 +7, suzhou, green, 1, 1575129699000, 17, 14.349156 +8, haerbing, yellow, 2, 1575129600000, 28, 13.254039 +8, haerbing, yellow, 2, 1575129601000, 21, 17.815564 +8, haerbing, yellow, 2, 1575129602000, 19, 11.209747 +8, haerbing, yellow, 2, 1575129603000, 26, 16.861074 +8, haerbing, yellow, 2, 1575129604000, 31, 11.504868 +8, haerbing, yellow, 2, 1575129605000, 34, 19.224629 +8, haerbing, yellow, 2, 1575129606000, 23, 11.358596 +8, haerbing, yellow, 2, 1575129607000, 31, 12.635280 +8, haerbing, yellow, 2, 1575129608000, 26, 11.433395 +8, haerbing, yellow, 2, 1575129609000, 17, 13.468466 +8, haerbing, yellow, 2, 1575129610000, 33, 14.519953 +8, haerbing, yellow, 2, 1575129611000, 15, 14.241436 +8, haerbing, yellow, 2, 1575129612000, 16, 13.055456 +8, haerbing, yellow, 2, 1575129613000, 17, 13.772431 +8, haerbing, yellow, 2, 1575129614000, 19, 12.057286 +8, haerbing, yellow, 2, 1575129615000, 19, 13.647710 +8, haerbing, yellow, 2, 1575129616000, 20, 15.103685 +8, haerbing, yellow, 2, 1575129617000, 18, 16.627761 +8, haerbing, yellow, 2, 1575129618000, 26, 18.441795 +8, haerbing, yellow, 2, 1575129619000, 15, 18.348824 +8, haerbing, yellow, 2, 1575129620000, 32, 18.431012 +8, haerbing, yellow, 2, 1575129621000, 17, 10.795047 +8, haerbing, yellow, 2, 1575129622000, 34, 10.793828 +8, haerbing, yellow, 2, 1575129623000, 18, 16.664458 +8, haerbing, yellow, 2, 1575129624000, 22, 16.533227 +8, haerbing, yellow, 2, 1575129625000, 15, 12.870278 +8, haerbing, yellow, 2, 1575129626000, 31, 17.592231 +8, haerbing, yellow, 2, 1575129627000, 17, 10.092316 +8, haerbing, yellow, 2, 1575129628000, 22, 10.988946 +8, haerbing, yellow, 2, 1575129629000, 17, 14.493579 +8, haerbing, yellow, 2, 1575129630000, 20, 11.943546 +8, haerbing, yellow, 2, 1575129631000, 28, 19.871601 +8, haerbing, yellow, 2, 1575129632000, 16, 16.607235 +8, haerbing, yellow, 2, 1575129633000, 19, 10.197650 +8, haerbing, yellow, 2, 1575129634000, 19, 10.742104 +8, haerbing, yellow, 2, 1575129635000, 30, 18.785863 +8, haerbing, yellow, 2, 1575129636000, 16, 14.827333 +8, haerbing, yellow, 2, 1575129637000, 28, 13.826542 +8, haerbing, yellow, 2, 1575129638000, 16, 18.638533 +8, haerbing, yellow, 2, 1575129639000, 24, 17.832974 +8, haerbing, yellow, 2, 1575129640000, 31, 14.904558 +8, haerbing, yellow, 2, 1575129641000, 32, 16.034774 +8, haerbing, yellow, 2, 1575129642000, 33, 16.879997 +8, haerbing, yellow, 2, 1575129643000, 18, 16.981511 +8, haerbing, yellow, 2, 1575129644000, 19, 18.554924 +8, haerbing, yellow, 2, 1575129645000, 28, 12.138742 +8, haerbing, yellow, 2, 1575129646000, 27, 17.938497 +8, haerbing, yellow, 2, 1575129647000, 25, 16.919425 +8, haerbing, yellow, 2, 1575129648000, 15, 17.739521 +8, haerbing, yellow, 2, 1575129649000, 26, 16.017035 +8, haerbing, yellow, 2, 1575129650000, 20, 14.530903 +8, haerbing, yellow, 2, 1575129651000, 32, 10.938258 +8, haerbing, yellow, 2, 1575129652000, 18, 15.265134 +8, haerbing, yellow, 2, 1575129653000, 25, 11.227825 +8, haerbing, yellow, 2, 1575129654000, 32, 15.839538 +8, haerbing, yellow, 2, 1575129655000, 20, 12.813906 +8, haerbing, yellow, 2, 1575129656000, 34, 14.348205 +8, haerbing, yellow, 2, 1575129657000, 23, 13.158134 +8, haerbing, yellow, 2, 1575129658000, 27, 18.320920 +8, haerbing, yellow, 2, 1575129659000, 31, 10.848533 +8, haerbing, yellow, 2, 1575129660000, 21, 13.549193 +8, haerbing, yellow, 2, 1575129661000, 21, 10.043014 +8, haerbing, yellow, 2, 1575129662000, 17, 13.852666 +8, haerbing, yellow, 2, 1575129663000, 20, 13.046154 +8, haerbing, yellow, 2, 1575129664000, 15, 15.538251 +8, haerbing, yellow, 2, 1575129665000, 25, 15.422191 +8, haerbing, yellow, 2, 1575129666000, 23, 17.912156 +8, haerbing, yellow, 2, 1575129667000, 31, 10.870706 +8, haerbing, yellow, 2, 1575129668000, 15, 15.348852 +8, haerbing, yellow, 2, 1575129669000, 15, 19.605174 +8, haerbing, yellow, 2, 1575129670000, 20, 12.633162 +8, haerbing, yellow, 2, 1575129671000, 23, 15.347140 +8, haerbing, yellow, 2, 1575129672000, 23, 19.131427 +8, haerbing, yellow, 2, 1575129673000, 28, 17.031277 +8, haerbing, yellow, 2, 1575129674000, 25, 12.871234 +8, haerbing, yellow, 2, 1575129675000, 27, 12.112865 +8, haerbing, yellow, 2, 1575129676000, 28, 14.989160 +8, haerbing, yellow, 2, 1575129677000, 34, 12.925199 +8, haerbing, yellow, 2, 1575129678000, 30, 11.244869 +8, haerbing, yellow, 2, 1575129679000, 34, 13.189385 +8, haerbing, yellow, 2, 1575129680000, 32, 12.347545 +8, haerbing, yellow, 2, 1575129681000, 29, 14.551418 +8, haerbing, yellow, 2, 1575129682000, 30, 14.502223 +8, haerbing, yellow, 2, 1575129683000, 32, 13.304706 +8, haerbing, yellow, 2, 1575129684000, 25, 12.030741 +8, haerbing, yellow, 2, 1575129685000, 17, 16.387617 +8, haerbing, yellow, 2, 1575129686000, 15, 19.766795 +8, haerbing, yellow, 2, 1575129687000, 21, 16.533866 +8, haerbing, yellow, 2, 1575129688000, 17, 11.657003 +8, haerbing, yellow, 2, 1575129689000, 34, 12.667008 +8, haerbing, yellow, 2, 1575129690000, 22, 15.673815 +8, haerbing, yellow, 2, 1575129691000, 22, 15.767975 +8, haerbing, yellow, 2, 1575129692000, 31, 19.982548 +8, haerbing, yellow, 2, 1575129693000, 29, 19.036149 +8, haerbing, yellow, 2, 1575129694000, 24, 16.044736 +8, haerbing, yellow, 2, 1575129695000, 19, 12.138802 +8, haerbing, yellow, 2, 1575129696000, 28, 17.771396 +8, haerbing, yellow, 2, 1575129697000, 31, 16.321497 +8, haerbing, yellow, 2, 1575129698000, 25, 15.864515 +8, haerbing, yellow, 2, 1575129699000, 25, 16.492443 +9, sijiazhuang, blue, 0, 1575129600000, 23, 16.002889 +9, sijiazhuang, blue, 0, 1575129601000, 26, 17.034610 +9, sijiazhuang, blue, 0, 1575129602000, 29, 12.892319 +9, sijiazhuang, blue, 0, 1575129603000, 34, 15.321807 +9, sijiazhuang, blue, 0, 1575129604000, 29, 12.562642 +9, sijiazhuang, blue, 0, 1575129605000, 32, 17.190246 +9, sijiazhuang, blue, 0, 1575129606000, 19, 15.361774 +9, sijiazhuang, blue, 0, 1575129607000, 26, 15.022364 +9, sijiazhuang, blue, 0, 1575129608000, 31, 14.837084 +9, sijiazhuang, blue, 0, 1575129609000, 25, 11.554289 +9, sijiazhuang, blue, 0, 1575129610000, 21, 15.313973 +9, sijiazhuang, blue, 0, 1575129611000, 27, 18.621783 +9, sijiazhuang, blue, 0, 1575129612000, 31, 18.018101 +9, sijiazhuang, blue, 0, 1575129613000, 23, 14.421450 +9, sijiazhuang, blue, 0, 1575129614000, 28, 10.833142 +9, sijiazhuang, blue, 0, 1575129615000, 33, 18.169837 +9, sijiazhuang, blue, 0, 1575129616000, 21, 18.772730 +9, sijiazhuang, blue, 0, 1575129617000, 24, 18.893146 +9, sijiazhuang, blue, 0, 1575129618000, 24, 10.290187 +9, sijiazhuang, blue, 0, 1575129619000, 23, 17.393345 +9, sijiazhuang, blue, 0, 1575129620000, 30, 12.949215 +9, sijiazhuang, blue, 0, 1575129621000, 19, 19.267621 +9, sijiazhuang, blue, 0, 1575129622000, 33, 14.831735 +9, sijiazhuang, blue, 0, 1575129623000, 21, 14.711125 +9, sijiazhuang, blue, 0, 1575129624000, 16, 17.168485 +9, sijiazhuang, blue, 0, 1575129625000, 17, 16.426433 +9, sijiazhuang, blue, 0, 1575129626000, 19, 13.879050 +9, sijiazhuang, blue, 0, 1575129627000, 21, 18.308168 +9, sijiazhuang, blue, 0, 1575129628000, 17, 10.845681 +9, sijiazhuang, blue, 0, 1575129629000, 20, 10.238272 +9, sijiazhuang, blue, 0, 1575129630000, 19, 19.424976 +9, sijiazhuang, blue, 0, 1575129631000, 31, 13.885909 +9, sijiazhuang, blue, 0, 1575129632000, 15, 19.264740 +9, sijiazhuang, blue, 0, 1575129633000, 30, 12.460645 +9, sijiazhuang, blue, 0, 1575129634000, 27, 17.608036 +9, sijiazhuang, blue, 0, 1575129635000, 25, 13.493812 +9, sijiazhuang, blue, 0, 1575129636000, 19, 10.955939 +9, sijiazhuang, blue, 0, 1575129637000, 24, 11.956587 +9, sijiazhuang, blue, 0, 1575129638000, 15, 19.141381 +9, sijiazhuang, blue, 0, 1575129639000, 24, 14.801530 +9, sijiazhuang, blue, 0, 1575129640000, 17, 14.347318 +9, sijiazhuang, blue, 0, 1575129641000, 29, 14.803237 +9, sijiazhuang, blue, 0, 1575129642000, 28, 10.342297 +9, sijiazhuang, blue, 0, 1575129643000, 29, 19.368282 +9, sijiazhuang, blue, 0, 1575129644000, 31, 17.491654 +9, sijiazhuang, blue, 0, 1575129645000, 18, 13.161736 +9, sijiazhuang, blue, 0, 1575129646000, 17, 16.067354 +9, sijiazhuang, blue, 0, 1575129647000, 18, 13.736465 +9, sijiazhuang, blue, 0, 1575129648000, 23, 19.103276 +9, sijiazhuang, blue, 0, 1575129649000, 29, 16.075892 +9, sijiazhuang, blue, 0, 1575129650000, 21, 10.728566 +9, sijiazhuang, blue, 0, 1575129651000, 15, 18.921849 +9, sijiazhuang, blue, 0, 1575129652000, 24, 16.914709 +9, sijiazhuang, blue, 0, 1575129653000, 19, 13.501651 +9, sijiazhuang, blue, 0, 1575129654000, 19, 13.538347 +9, sijiazhuang, blue, 0, 1575129655000, 16, 13.261095 +9, sijiazhuang, blue, 0, 1575129656000, 32, 16.315746 +9, sijiazhuang, blue, 0, 1575129657000, 27, 16.400939 +9, sijiazhuang, blue, 0, 1575129658000, 24, 13.321819 +9, sijiazhuang, blue, 0, 1575129659000, 27, 19.070181 +9, sijiazhuang, blue, 0, 1575129660000, 27, 13.040922 +9, sijiazhuang, blue, 0, 1575129661000, 32, 10.872530 +9, sijiazhuang, blue, 0, 1575129662000, 28, 16.428657 +9, sijiazhuang, blue, 0, 1575129663000, 32, 13.883854 +9, sijiazhuang, blue, 0, 1575129664000, 33, 14.299554 +9, sijiazhuang, blue, 0, 1575129665000, 30, 16.445130 +9, sijiazhuang, blue, 0, 1575129666000, 15, 18.059404 +9, sijiazhuang, blue, 0, 1575129667000, 21, 12.348847 +9, sijiazhuang, blue, 0, 1575129668000, 32, 13.315378 +9, sijiazhuang, blue, 0, 1575129669000, 17, 15.689507 +9, sijiazhuang, blue, 0, 1575129670000, 22, 15.591808 +9, sijiazhuang, blue, 0, 1575129671000, 27, 16.386065 +9, sijiazhuang, blue, 0, 1575129672000, 25, 10.564803 +9, sijiazhuang, blue, 0, 1575129673000, 20, 12.276544 +9, sijiazhuang, blue, 0, 1575129674000, 26, 15.828786 +9, sijiazhuang, blue, 0, 1575129675000, 18, 12.236420 +9, sijiazhuang, blue, 0, 1575129676000, 15, 19.439522 +9, sijiazhuang, blue, 0, 1575129677000, 19, 19.831531 +9, sijiazhuang, blue, 0, 1575129678000, 22, 17.115744 +9, sijiazhuang, blue, 0, 1575129679000, 29, 19.879456 +9, sijiazhuang, blue, 0, 1575129680000, 34, 10.207136 +9, sijiazhuang, blue, 0, 1575129681000, 16, 17.633523 +9, sijiazhuang, blue, 0, 1575129682000, 15, 14.227873 +9, sijiazhuang, blue, 0, 1575129683000, 34, 12.027768 +9, sijiazhuang, blue, 0, 1575129684000, 22, 11.376610 +9, sijiazhuang, blue, 0, 1575129685000, 21, 11.711299 +9, sijiazhuang, blue, 0, 1575129686000, 33, 14.281126 +9, sijiazhuang, blue, 0, 1575129687000, 31, 10.895302 +9, sijiazhuang, blue, 0, 1575129688000, 31, 13.971350 +9, sijiazhuang, blue, 0, 1575129689000, 15, 15.262790 +9, sijiazhuang, blue, 0, 1575129690000, 23, 12.440568 +9, sijiazhuang, blue, 0, 1575129691000, 32, 19.731267 +9, sijiazhuang, blue, 0, 1575129692000, 22, 10.518092 +9, sijiazhuang, blue, 0, 1575129693000, 34, 17.863021 +9, sijiazhuang, blue, 0, 1575129694000, 28, 11.478909 +9, sijiazhuang, blue, 0, 1575129695000, 16, 15.075524 +9, sijiazhuang, blue, 0, 1575129696000, 16, 10.292127 +9, sijiazhuang, blue, 0, 1575129697000, 22, 13.716012 +9, sijiazhuang, blue, 0, 1575129698000, 32, 10.906551 +9, sijiazhuang, blue, 0, 1575129699000, 19, 18.386868 \ No newline at end of file diff --git a/importSampleData/import/import_config.go b/importSampleData/import/import_config.go new file mode 100644 index 0000000000000000000000000000000000000000..e7942cc5050ae369afe896d0f46a0e242fb7e8f6 --- /dev/null +++ b/importSampleData/import/import_config.go @@ -0,0 +1,66 @@ +package dataimport + +import ( + "encoding/json" + "fmt" + "path/filepath" + "sync" + + "github.com/pelletier/go-toml" +) + +var ( + cfg Config + once sync.Once +) + +// Config inclue all scene import config +type Config struct { + UserCases map[string]CaseConfig +} + +// CaseConfig include the sample data config and tdengine config +type CaseConfig struct { + Format string + FilePath string + Separator string + Stname string + SubTableName string + Timestamp string + TimestampType string + TimestampTypeFormat string + Tags []FieldInfo + Fields []FieldInfo +} + +// FieldInfo is field or tag info +type FieldInfo struct { + Name string + Type string +} + +// LoadConfig will load the specified file config +func LoadConfig(filePath string) Config { + once.Do(func() { + filePath, err := filepath.Abs(filePath) + if err != nil { + panic(err) + } + fmt.Printf("parse toml file once. filePath: %s\n", filePath) + tree, err := toml.LoadFile(filePath) + if err != nil { + panic(err) + } + + bytes, err := json.Marshal(tree.ToMap()) + if err != nil { + panic(err) + } + + err = json.Unmarshal(bytes, &cfg.UserCases) + if err != nil { + panic(err) + } + }) + return cfg +} diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 9d61b0df68f3480834fddacd5d701ee0559352e8..588f0650bd8b67033504d5f0b9c3f77cca24c54a 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -5,14 +5,39 @@ # # ######################################################## +# master IP for TDengine system +# masterIp 127.0.0.1 -# Internal IP address of the server, which can be acquired by using ifconfig command. -# internalIp 127.0.0.1 +# second IP for TDengine system, for cluster version only +# secondIp 127.0.0.1 + +# IP address of the server +# privateIp 127.0.0.1 + +# public IP of server, on which the tdengine are deployed +# this IP is assigned by cloud service provider, for cluster version only +# publicIp 127.0.0.1 + +# network is bound to 0.0.0.0 +# anyIp 1 + +# set socket type ("udp" and "tcp") +# the server and client should have the same socket type. Otherwise, connect will fail +# sockettype udp # client local IP # localIp 127.0.0.1 -# data file's directory +# for the cluster version, data file's directory is configured this way +# option mount_path tier_level +# dataDir /mnt/disk1/taos 0 +# dataDir /mnt/disk2/taos 0 +# dataDir /mnt/disk3/taos 0 +# dataDir /mnt/disk4/taos 0 +# dataDir /mnt/disk5/taos 0 +# dataDir /mnt/disk6/taos 1 +# dataDir /mnt/disk7/taos 1 +# for the stand-alone version, data file's directory is configured this way # dataDir /var/lib/taos # log file's directory @@ -27,6 +52,18 @@ # port for DNode connect to Client, default udp[6035-6039] tcp[6035] # vnodeShellPort 6035 +# port for MNode connect to VNode, default udp[6040-6044] tcp[6040], for cluster version only +# mgmtVnodePort 6040 + +# port for DNode connect to DNode, default tcp[6045], for cluster version only +# vnodeVnodePort 6045 + +# port for MNode connect to MNode, default udp[6050], for cluster version only +# mgmtMgmtPort 6050 + +# port sync file MNode and MNode, default tcp[6050], for cluster version only +# mgmtSyncPort 6050 + # number of threads per CPU core # numOfThreadsPerCore 1 @@ -54,11 +91,7 @@ # interval of system monitor # monitorInterval 60 -# set socket type("udp" and "tcp"). -# The server and client should have the same socket type. Otherwise, connect will fail. -# sockettype udp - -# The compressed rpc message, option: +# the compressed rpc message, option: # -1 (no compression) # 0 (all message compressed), # > 0 (rpc message body which larger than this value will be compressed) @@ -73,12 +106,18 @@ # commit interval,unit is second # ctime 3600 -# interval of DNode report status to MNode, unit is Second +# interval of DNode report status to MNode, unit is Second, for cluster version only # statusInterval 1 # interval of Shell send HB to MNode, unit is Second # shellActivityTimer 3 +# interval of DNode send HB to DNode, unit is Second, for cluster version only +# vnodePeerHBTimer 1 + +# interval of MNode send HB to MNode, unit is Second, for cluster version only +# mgmtPeerHBTimer 1 + # time to keep MeterMeta in Cache, seconds # meterMetaKeepTimer 7200 @@ -94,12 +133,21 @@ # max number of tables # maxTables 650000 +# max number of Dnodes, for cluster version only +# maxDnodes 1000 + +# Max number of VGroups, for cluster version only +# maxVGroups 1000 + # system locale # locale en_US.UTF-8 # default system charset # charset UTF-8 +# system time zone +# timezone Asia/Shanghai (CST, +0800) + # enable/disable commit log # clog 1 @@ -115,6 +163,9 @@ # number of days to keep DB file # keep 3650 +# number of replications, for cluster version only +# replications 1 + # client default database(database should be created) # defaultDB @@ -136,18 +187,36 @@ # max connection to Vnode # maxVnodeConnections 10000 -# start http service in the cluster +# mnode take into account while balance, for cluster version only +# mgmtEqualVnodeNum 4 + +# number of seconds allowed for a dnode to be offline, for cluster version only +# offlineThreshold 864000 + +# start http service # http 1 -# start system monitor module in the cluster +# start system monitor module # monitor 1 +# maximum number of rows returned by the restful interface +# restfulRowLimit 10240 + # number of threads used to process http requests # httpMaxThreads 2 # pre-allocated number of http sessions # httpCacheSessions 100 +# whether to enable HTTP compression transmission +# httpEnableCompress 0 + +# the delayed time for launching each continuous query. 10% of the whole computing time window by default. +# streamCompDelayRatio 0.1 + +# the max allowed delayed time for launching continuous query. 20ms by default +# tsMaxStreamComputDelay 20000 + # whether the telegraf table name contains the number of tags and the number of fields # telegrafUseFieldNum 0 diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 501a06eddb0397afbf7170e603c71abab2326496..55fbd96d3f68b6157d70ac41ca4891d673104a7e 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -1,15 +1,20 @@ #!/bin/bash # # Generate deb package for ubuntu +set -e # set -x #curr_dir=$(pwd) compile_dir=$1 output_dir=$2 tdengine_ver=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -m ${script_dir}/../..)" +top_dir="$(readlink -f ${script_dir}/../..)" pkg_dir="${top_dir}/debworkroom" #echo "curr_dir: ${curr_dir}" @@ -63,7 +68,25 @@ debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control #get taos version, then set deb name -debname="TDengine-"${tdengine_ver}".deb" + + +if [ "$verMode" == "cluster" ]; then + debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$verType" == "beta" ]; then + debname=${debname}-${verType}".deb" +elif [ "$verType" == "stable" ]; then + debname=${debname}".deb" +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi # make deb package dpkg -b ${pkg_dir} $debname diff --git a/packaging/release.sh b/packaging/release.sh index 08edce249e3d81a24fb189692bf95dbd8e9ad9be..a4562d21d2bd15fa6c4ba3067f6046427bf60c2c 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -3,13 +3,69 @@ # Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os set -e -# set -x +#set -x -armver=$1 +# releash.sh -v [cluster | edge] +# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] +# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] +# -V [stable | beta] +# -l [full | lite] + +# set parameters by default value +verMode=edge # [cluster, edge] +verType=stable # [stable, beta] +cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] +osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] +pagMode=full # [full | lite] + +while getopts "hv:V:c:o:l:" arg +do + case $arg in + v) + #echo "verMode=$OPTARG" + verMode=$( echo $OPTARG ) + ;; + V) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + c) + #echo "cpuType=$OPTARG" + cpuType=$(echo $OPTARG) + ;; + l) + #echo "pagMode=$OPTARG" + pagMode=$(echo $OPTARG) + ;; + o) + #echo "osType=$OPTARG" + osType=$(echo $OPTARG) + ;; + h) + echo "Usage: `basename $0` -v [cluster | edge] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] -l [full | lite]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; + esac +done + +echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode}" curr_dir=$(pwd) -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -m ${script_dir}/..)" + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/.. +fi + versioninfo="${top_dir}/src/util/src/version.c" csudo="" @@ -106,32 +162,53 @@ done # output the version info to the buildinfo file. build_time=$(date +"%F %R") -echo "char version[64] = \"${version}\";" > ${versioninfo} -echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo} -echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} -echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo} +echo "char version[64] = \"${version}\";" > ${versioninfo} +echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo} +echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} +if [ "$verMode" != "cluster" ]; then + echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo} +else + enterprise_dir="${top_dir}/../enterprise" + cd ${enterprise_dir} + echo "char gitinfoOfInternal[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} + cd ${curr_dir} +fi echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo} +echo "" >> ${versioninfo} +tmp_version=$(echo $version | tr -s "." "_") +if [ "$verMode" == "cluster" ]; then + libtaos_info=${tmp_version}_${osType}_${cpuType} +else + libtaos_info=edge_${tmp_version}_${osType}_${cpuType} +fi +if [ "$verType" == "beta" ]; then + libtaos_info=${libtaos_info}_${verType} +fi +echo "void libtaos_${libtaos_info}() {};" >> ${versioninfo} # 2. cmake executable file - compile_dir="${top_dir}/debug" if [ -d ${compile_dir} ]; then ${csudo} rm -rf ${compile_dir} fi -${csudo} mkdir -p ${compile_dir} +if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${compile_dir} +else + mkdir -p ${compile_dir} +fi cd ${compile_dir} -# arm only support lite ver -if [ -z "$armver" ]; then - cmake ../ -elif [ "$armver" == "arm64" ]; then - cmake ../ -DARMVER=arm64 -elif [ "$armver" == "arm32" ]; then - cmake ../ -DARMVER=arm32 +# check support cpu type +if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then + if [ "$verMode" != "cluster" ]; then + cmake ../ -DCPUTYPE=${cpuType} -DPAGMODE=${pagMode} + else + cmake ../../ -DCPUTYPE=${cpuType} + fi else - echo "input parameter error!!!" - return + echo "input cpuType=${cpuType} error!!!" + exit 1 fi make @@ -143,28 +220,36 @@ cd ${curr_dir} #osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) #echo "osinfo: ${osinfo}" -echo "do deb package for the ubuntu system" -output_dir="${top_dir}/debs" -if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} -fi -${csudo} mkdir -p ${output_dir} -cd ${script_dir}/deb -${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} - -echo "do rpm package for the centos system" -output_dir="${top_dir}/rpms" -if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} +if [ "$osType" != "Darwin" ]; then + if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then + echo "====do deb package for the ubuntu system====" + output_dir="${top_dir}/debs" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/deb + ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} + + echo "====do rpm package for the centos system====" + output_dir="${top_dir}/rpms" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/rpm + ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} + fi + + echo "====do tar.gz package for all systems====" + cd ${script_dir}/tools + + ${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + ${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} +else + cd ${script_dir}/tools + ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} fi -${csudo} mkdir -p ${output_dir} -cd ${script_dir}/rpm -${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} - -echo "do tar.gz package for all systems" -cd ${script_dir}/tools -${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${armver} -${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${armver} # 4. Clean up temporary compile directories #${csudo} rm -rf ${compile_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index e301f5ece984e5853227e040ecf8cfbd7c655670..678e75c500937330c5e7364b580d3146d7974d78 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -2,16 +2,20 @@ # # Generate rpm package for centos -#set -e -#set -x +set -e +# set -x #curr_dir=$(pwd) compile_dir=$1 output_dir=$2 tdengine_ver=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -m ${script_dir}/../..)" +top_dir="$(readlink -f ${script_dir}/../..)" pkg_dir="${top_dir}/rpmworkroom" spec_file="${script_dir}/tdengine.spec" @@ -54,9 +58,30 @@ ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file} -# copy rpm package to output_dir, then clean temp dir +# copy rpm package to output_dir, and modify package name, then clean temp dir #${csudo} cp -rf RPMS/* ${output_dir} cp_rpm_package ${pkg_dir}/RPMS + +if [ "$verMode" == "cluster" ]; then + rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$verType" == "beta" ]; then + rpmname=${rpmname}-${verType}".rpm" +elif [ "$verType" == "stable" ]; then + rpmname=${rpmname}".rpm" +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname} + cd .. ${csudo} rm -rf ${pkg_dir} diff --git a/packaging/rpm/taosd b/packaging/rpm/taosd index 6283c79383024df9ee39affcd2c85bfe27562b2a..46dd712e3139dad69d3db6db8b289d0f2424811a 100644 --- a/packaging/rpm/taosd +++ b/packaging/rpm/taosd @@ -26,7 +26,7 @@ MAX_OPEN_FILES=65535 # Default program options NAME=taosd -PROG=/usr/local/bin/taos/taosd +PROG=/usr/local/taos/bin/taosd USER=root GROUP=root diff --git a/packaging/tools/get_os.sh b/packaging/tools/get_os.sh new file mode 100755 index 0000000000000000000000000000000000000000..f74b63f9805e937933000d097c24bc6b85663288 --- /dev/null +++ b/packaging/tools/get_os.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# +# This file is used to install TAOS time-series database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +# set -x + +# -----------------------Variables definition--------------------- +OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2) +len=$(echo ${#OS}) +len=$((len-2)) +retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1) +echo -ne $retval diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh old mode 100755 new mode 100644 index e1bcce401d419ce7a97c4110161777cfeea4f0e5..c573a2086f05ace5b496bde9618f0bdc45026cf1 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -6,8 +6,11 @@ set -e #set -x +verMode=edge +pagMode=full + # -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -m "$0")) +script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory data_dir="/var/lib/taos" log_dir="/var/log/taos" @@ -27,7 +30,12 @@ install_main_dir="/usr/local/taos" # old bin dir bin_dir="/usr/local/taos/bin" +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" + service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" # Color setting RED='\033[0;31m' @@ -41,6 +49,8 @@ if command -v sudo > /dev/null; then csudo="sudo" fi +update_flag=0 + initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then @@ -69,23 +79,24 @@ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) #echo "osinfo: ${osinfo}" os_type=0 if echo $osinfo | grep -qwi "ubuntu" ; then - echo "this is ubuntu system" + echo "This is ubuntu system" os_type=1 elif echo $osinfo | grep -qwi "debian" ; then - echo "this is debian system" + echo "This is debian system" os_type=1 elif echo $osinfo | grep -qwi "Kylin" ; then - echo "this is Kylin system" + echo "This is Kylin system" os_type=1 elif echo $osinfo | grep -qwi "centos" ; then - echo "this is centos system" + echo "This is centos system" os_type=2 elif echo $osinfo | grep -qwi "fedora" ; then - echo "this is fedora system" + echo "This is fedora system" os_type=2 else - echo "this is other linux system" - os_type=0 + echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " + echo "please feel free to contact taosdata.com for support." + os_type=1 fi function kill_taosd() { @@ -106,6 +117,9 @@ function install_main_path() { ${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/include ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi } function install_bin() { @@ -124,16 +138,30 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi } function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ "$verMode" == "cluster" ]; then + # Compatible with version 1.5 + ${csudo} mkdir -p ${v15_java_app_dir} + ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar + ${csudo} chmod 777 ${v15_java_app_dir} || : + fi } function install_header() { @@ -154,6 +182,57 @@ function install_config() { ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + + if [ "$verMode" == "cluster" ]; then + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + IP_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + IP_PATTERN="\b$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\b" + + echo + echo -e -n "${GREEN}Enter the IP address of an existing TDengine cluster node to join${NC} OR ${GREEN}leave it blank to build one${NC} :" + read masterIp + while true; do + if [ ! -z "$masterIp" ]; then + # check the format of the masterIp + if [[ $masterIp =~ $IP_PATTERN ]]; then + # Write the first IP to configuration file + sudo sed -i -r "s/#*\s*(masterIp\s*).*/\1$masterIp/" ${cfg_dir}/taos.cfg + + # Get the second IP address + + echo + echo -e -n "${GREEN}Enter the IP address of another node in cluster${NC} OR ${GREEN}leave it blank to skip${NC}: " + read secondIp + while true; do + + if [ ! -z "$secondIp" ]; then + if [[ $secondIp =~ $IP_PATTERN ]]; then + # Write the second IP to configuration file + sudo sed -i -r "s/#*\s*(secondIp\s*).*/\1$secondIp/" ${cfg_dir}/taos.cfg + break + else + read -p "Please enter the correct IP address: " secondIp + fi + else + break + fi + done + + break + else + read -p "Please enter the correct IP address: " masterIp + fi + else + break + fi + done + + fi } @@ -175,7 +254,9 @@ function install_connector() { } function install_examples() { - ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi } function clean_service_on_sysvinit() { @@ -240,7 +321,19 @@ function clean_service_on_systemd() { ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} -} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + + if systemctl is-active --quiet nginxd; then + echo "Nginx for TDengine is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi +} # taos:2345:respawn:/etc/init.d/taosd start @@ -269,6 +362,36 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi } function install_service() { @@ -357,12 +480,29 @@ function update_TDengine() { install_log install_header install_lib - install_connector + if [ "$pagMode" != "lite" ]; then + install_connector + fi install_examples if [ -z $1 ]; then install_bin install_service install_config + + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + openresty_work=false + + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for TDengine is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" + fi + fi + fi echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" @@ -376,7 +516,15 @@ function update_TDengine() { echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" fi - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + if [ "$verMode" == "cluster" ]; then + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" else @@ -409,13 +557,29 @@ function install_TDengine() { install_log install_header install_lib - install_connector + if [ "$pagMode" != "lite" ]; then + install_connector + fi install_examples if [ -z $1 ]; then # install service and client # For installing new install_bin install_service + + if [ "$verMode" == "cluster" ]; then + openresty_work=false + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for TDengine is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" + fi + fi + fi + install_config # Ask if to start the service @@ -430,8 +594,17 @@ function install_TDengine() { else echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" fi - - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + + if [ "$verMode" == "cluster" ]; then + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + echo echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" else # Only install client @@ -450,6 +623,7 @@ function install_TDengine() { if [ -z $1 ]; then # Install server and client if [ -x ${bin_dir}/taosd ]; then + update_flag=1 update_TDengine else install_TDengine @@ -457,6 +631,7 @@ if [ -z $1 ]; then else # Only install client if [ -x ${bin_dir}/taos ]; then + update_flag=1 update_TDengine client else install_TDengine client diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 7560ebca4140688dbbee461e58ddcfef9a4b2391..605944e9b3d3a296b9cd6ce252224d3702e57fc8 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -7,18 +7,36 @@ set -e #set -x # -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -m "$0")) -# Dynamic directory -data_dir="/var/lib/taos" -log_dir="/var/log/taos" + +osType=Linux +pagMode=full + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/taos" + log_dir="/var/log/taos" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/taos" + log_dir="~/TDengineLog" +fi log_link_dir="/usr/local/taos/log" cfg_install_dir="/etc/taos" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -inc_link_dir="/usr/include" +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi #install main path install_main_dir="/usr/local/taos" @@ -26,6 +44,8 @@ install_main_dir="/usr/local/taos" # old bin dir bin_dir="/usr/local/taos/bin" +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" # Color setting RED='\033[0;31m' @@ -51,9 +71,9 @@ function kill_client() { function install_main_path() { #create install main dir and all sub dir ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples @@ -61,51 +81,60 @@ function install_main_path() { } function install_bin() { - # Remove links - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : - ${csudo} rm -f ${bin_link_dir}/rmtaos || : + # Remove links + ${csudo} rm -f ${bin_link_dir}/taos || : + if [ "$osType" == "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/taosdump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmtaos || : - ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link - [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + if [ "$osType" == "Darwin" ]; then + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + fi + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : } function clean_lib() { - sudo rm -f /usr/lib/libtaos.so || : + sudo rm -f /usr/lib/libtaos.* || : sudo rm -rf ${lib_dir} || : } function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : - ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi } function install_header() { ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* - fi - + fi + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg } @@ -113,8 +142,12 @@ function install_config() { function install_log() { ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi ${csudo} ln -s ${log_dir} ${install_main_dir}/log } @@ -142,17 +175,19 @@ function update_TDengine() { kill_client sleep 1 fi - + install_main_path install_log install_header install_lib - install_connector + if [ "$pagMode" != "lite" ]; then + install_connector + fi install_examples install_bin install_config - + echo echo -e "\033[44;32;1mTDengine client is updated successfully!${NC}" @@ -168,16 +203,18 @@ function install_TDengine() { tar -zxf taos.tar.gz echo -e "${GREEN}Start to install TDengine client...${NC}" - - install_main_path - install_log + + install_main_path + install_log install_header install_lib - install_connector + if [ "$pagMode" != "lite" ]; then + install_connector + fi install_examples install_bin install_config - + echo echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" @@ -191,8 +228,8 @@ function install_TDengine() { if [ -e ${bin_dir}/taosd ]; then echo -e "\033[44;32;1mThere are already installed TDengine server, so don't need install client!${NC}" exit 0 - fi - + fi + if [ -x ${bin_dir}/taos ]; then update_flag=1 update_TDengine diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index c43f918e8d520a38a0184f0a2a970f6fd78b7cf5..2200c7f13d20ecd19b75c8fe39185a3c69558f22 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -9,19 +9,37 @@ set -e # -----------------------Variables definition--------------------- source_dir=$1 binary_dir=$2 -script_dir=$(dirname $(readlink -m "$0")) +osType=$3 + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) +else + script_dir=${source_dir}/packaging/tools +fi + # Dynamic directory data_dir="/var/lib/taos" -log_dir="/var/log/taos" + +if [ "$osType" != "Darwin" ]; then + log_dir="/var/log/taos" +else + log_dir="~/TDengineLog" +fi data_link_dir="/usr/local/taos/data" log_link_dir="/usr/local/taos/log" cfg_install_dir="/etc/taos" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -inc_link_dir="/usr/include" +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi #install main path install_main_dir="/usr/local/taos" @@ -43,25 +61,61 @@ if command -v sudo > /dev/null; then csudo="sudo" fi -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which insserv &> /dev/null); then - service_mod=1 - initd_mod=1 - service_config_dir="/etc/init.d" -elif $(which update-rc.d &> /dev/null); then - service_mod=1 - initd_mod=2 - service_config_dir="/etc/init.d" -else +if [ "$osType" != "Darwin" ]; then + + initd_mod=0 service_mod=2 + if pidof systemd &> /dev/null; then + service_mod=0 + elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi + else + service_mod=2 + fi + + # get the operating system type for using the corresponding init file + # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification + #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) + #echo "osinfo: ${osinfo}" + os_type=0 + if echo $osinfo | grep -qwi "ubuntu" ; then + echo "this is ubuntu system" + os_type=1 + elif echo $osinfo | grep -qwi "debian" ; then + echo "this is debian system" + os_type=1 + elif echo $osinfo | grep -qwi "Kylin" ; then + echo "this is Kylin system" + os_type=1 + elif echo $osinfo | grep -qwi "centos" ; then + echo "this is centos system" + os_type=2 + elif echo $osinfo | grep -qwi "fedora" ; then + echo "this is fedora system" + os_type=2 + else + echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " + echo "please feel free to contact taosdata.com for support." + os_type=1 + fi fi function kill_taosd() { - pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - ${csudo} kill -9 ${pid} || : + pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function install_main_path() { @@ -74,37 +128,62 @@ function install_main_path() { ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/include - ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${install_main_dir}/init.d + fi } function install_bin() { # Remove links - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : - ${csudo} rm -f ${bin_link_dir}/rmtaos || : - - ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin - ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + ${csudo} rm -f ${bin_link_dir}/taos || : + + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/taosd || : + ${csudo} rm -f ${bin_link_dir}/taosdemo || : + ${csudo} rm -f ${bin_link_dir}/taosdump || : + fi + + ${csudo} rm -f ${bin_link_dir}/rmtaos || : + + ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin + + if [ "$osType" != "Darwin" ]; then + ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + else + ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin + fi ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + fi + + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + else + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + fi } function install_lib() { # Remove links - ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c) - ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + if [ "$osType" != "Darwin" ]; then + ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + else + ${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi } function install_header() { @@ -130,8 +209,13 @@ function install_config() { function install_log() { ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log } @@ -153,20 +237,26 @@ function install_examples() { } function clean_service_on_sysvinit() { - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi - ${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - ${csudo} rm -f ${service_config_dir}/taosd || : if ((${initd_mod}==1)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} insserv -r taosd || : + ${csudo} chkconfig --del taosd || : elif ((${initd_mod}==2)); then - ${csudo} grep -q -F "taos" /etc/inittab && ${csudo} update-rc.d -f taosd remove || : + ${csudo} insserv -r taosd || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d -f taosd remove || : + fi + + ${csudo} rm -f ${service_config_dir}/taosd || : + + if $(which init &> /dev/null); then + ${csudo} init q || : fi -# ${csudo} update-rc.d -f taosd remove || : - ${csudo} init q || : } function install_service_on_sysvinit() { @@ -175,19 +265,26 @@ function install_service_on_sysvinit() { sleep 1 # Install taosd service + if ((${os_type}==1)); then ${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d ${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd - restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - - ${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - # TODO: for centos, change here + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d + ${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd + fi + + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" + if ((${initd_mod}==1)); then - ${csudo} insserv taosd || : + ${csudo} chkconfig --add taosd || : + ${csudo} chkconfig --level 2345 taosd on || : elif ((${initd_mod}==2)); then + ${csudo} insserv taosd || : + ${csudo} insserv -d taosd || : + elif ((${initd_mod}==3)); then ${csudo} update-rc.d taosd defaults || : fi -# ${csudo} update-rc.d taosd defaults - # chkconfig mysqld on } function clean_service_on_systemd() { @@ -237,7 +334,7 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual start taosd + # must manual stop taosd kill_taosd fi } @@ -245,7 +342,9 @@ function install_service() { function update_TDengine() { echo -e "${GREEN}Start to update TDEngine...${NC}" # Stop the service if running - if pidof taosd &> /dev/null; then + + if [ "$osType" != "Darwin" ]; then + if pidof taosd &> /dev/null; then if ((${service_mod}==0)); then ${csudo} systemctl stop taosd || : elif ((${service_mod}==1)); then @@ -254,6 +353,7 @@ function update_TDengine() { kill_taosd fi sleep 1 + fi fi install_main_path @@ -264,32 +364,54 @@ function update_TDengine() { install_connector install_examples install_bin - install_service + + if [ "$osType" != "Darwin" ]; then + install_service + fi + install_config - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" - elif ((${service_mod}==1)); then + if [ "$osType" != "Darwin" ]; then + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo + + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" + elif ((${service_mod}==1)); then echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" - else - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" - fi + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + else + echo + echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}" + echo - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo -e "${GREEN_DARK}To access TDengine Client ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}" + fi } function install_TDengine() { # Start to install - echo -e "${GREEN}Start to install TDEngine...${NC}" - + if [ "$osType" != "Darwin" ]; then + echo -e "${GREEN}Start to install TDEngine...${NC}" + else + echo -e "${GREEN}Start to install TDEngine Client ...${NC}" + fi + install_main_path - install_data + + if [ "$osType" != "Darwin" ]; then + install_data + fi install_log install_header install_lib @@ -297,30 +419,41 @@ function install_TDengine() { install_examples install_bin - install_service + + if [ "$osType" != "Darwin" ]; then + install_service + fi + install_config - # Ask if to start the service - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + + if [ "$osType" != "Darwin" ]; then + # Ask if to start the service + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" else - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine Client is installed successfully!${NC}" fi - - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" } ## ==============================Main program starts from here============================ echo source directory: $1 echo binary directory: $2 -if [ -x ${bin_dir}/taosd ]; then +if [ -x ${bin_dir}/taos ]; then update_TDengine else install_TDengine diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index f8d21e202b8649c03d40d33d1c43c6338b12f790..6120f9fcc2bb676b716be5172967bd7dfe2f0090 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -1,17 +1,28 @@ #!/bin/bash # -# Generate tar.gz package for linux client +# Generate tar.gz package for linux client in all os system set -e -set -x +#set -x curr_dir=$(pwd) compile_dir=$1 version=$2 build_time=$3 -armver=$4 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -m ${script_dir}/../..)" +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi # create compressed install file. build_dir="${compile_dir}/build" @@ -19,13 +30,32 @@ code_dir="${top_dir}/src" release_dir="${top_dir}/release" #package_name='linux' -install_dir="${release_dir}/TDengine-client-${version}" + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/TDengine-enterprise-client" +else + install_dir="${release_dir}/TDengine-client" +fi # Directories and files. -bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" -lib_files="${build_dir}/lib/libtaos.so.${version}" + +if [ "$osType" != "Darwin" ]; then + if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taos + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" + else + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" + fi + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" cfg_dir="${top_dir}/packaging/cfg" + install_files="${script_dir}/install_client.sh" # make directories. @@ -35,20 +65,39 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cf mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f taos.tar.gz * --remove-files || : +else + tar -zcv -f taos.tar.gz * || : + mv taos.tar.gz .. + rm -rf ./* + mv ../taos.tar.gz . +fi cd ${curr_dir} -cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >> install_client_temp.sh + mv install_client_temp.sh ${install_dir}/install_client.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >> install_client_temp.sh + mv install_client_temp.sh ${install_dir}/install_client.sh +fi +chmod a+x ${install_dir}/install_client.sh # Copy example code mkdir -p ${install_dir}/examples -cp -r ${top_dir}/tests/examples/c ${install_dir}/examples -cp -r ${top_dir}/tests/examples/JDBC ${install_dir}/examples -cp -r ${top_dir}/tests/examples/matlab ${install_dir}/examples -cp -r ${top_dir}/tests/examples/python ${install_dir}/examples -cp -r ${top_dir}/tests/examples/R ${install_dir}/examples -cp -r ${top_dir}/tests/examples/go ${install_dir}/examples - +examples_dir="${top_dir}/tests/examples" +cp -r ${examples_dir}/c ${install_dir}/examples +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + cp -r ${examples_dir}/JDBC ${install_dir}/examples + cp -r ${examples_dir}/matlab ${install_dir}/examples + cp -r ${examples_dir}/python ${install_dir}/examples + cp -r ${examples_dir}/R ${install_dir}/examples + cp -r ${examples_dir}/go ${install_dir}/examples +fi # Copy driver mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver @@ -56,23 +105,51 @@ cp ${lib_files} ${install_dir}/driver # Copy connector connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector -cp ${build_dir}/lib/*.jar ${install_dir}/connector -cp -r ${connector_dir}/grafana ${install_dir}/connector/ -cp -r ${connector_dir}/python ${install_dir}/connector/ -cp -r ${connector_dir}/go ${install_dir}/connector +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + if [ "$osType" != "Darwin" ]; then + cp ${build_dir}/lib/*.jar ${install_dir}/connector + fi + cp -r ${connector_dir}/grafana ${install_dir}/connector/ + cp -r ${connector_dir}/python ${install_dir}/connector/ + cp -r ${connector_dir}/go ${install_dir}/connector +fi # Copy release note # cp ${script_dir}/release_note ${install_dir} # exit 1 -cd ${release_dir} -if [ -z "$armver" ]; then - tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files -elif [ "$armver" == "arm64" ]; then - tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files -elif [ "$armver" == "arm32" ]; then - tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${version}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${version}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stable or beta" + exit 1 +fi + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . fi cd ${curr_dir} diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 714b74dbe6952a5ef9387e9709551fb67cf441bf..d39cf418434dc75b90602428f07475f3c796067a 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -1,15 +1,22 @@ #!/bin/bash # -# Generate deb package for other os system (no unbutu or centos) +# Generate tar.gz package for all os system + +set -e +#set -x curr_dir=$(pwd) compile_dir=$1 version=$2 build_time=$3 -armver=$4 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -m ${script_dir}/../..)" +top_dir="$(readlink -f ${script_dir}/../..)" # create compressed install file. build_dir="${compile_dir}/build" @@ -17,14 +24,26 @@ code_dir="${top_dir}/src" release_dir="${top_dir}/release" #package_name='linux' -install_dir="${release_dir}/TDengine-${version}" +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/TDengine-enterprise-server" +else + install_dir="${release_dir}/TDengine-server" +fi # Directories and files. -bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh" +if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taos + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" +else + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh" +fi + lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" cfg_dir="${top_dir}/packaging/cfg" install_files="${script_dir}/install.sh" +nginx_dir="${code_dir}/../../enterprise/src/modules/web" # Init file #init_dir=${script_dir}/deb @@ -44,22 +63,53 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $ mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh + mv remove_temp.sh ${install_dir}/bin/remove.sh + + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : +tar -zcv -f taos.tar.gz * --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar taos.tar.gz error !!!" + exit $exitcode +fi cd ${curr_dir} -cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >> install_temp.sh + mv install_temp.sh ${install_dir}/install.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_temp.sh + mv install_temp.sh ${install_dir}/install.sh +fi +chmod a+x ${install_dir}/install.sh # Copy example code mkdir -p ${install_dir}/examples examples_dir="${top_dir}/tests/examples" -cp -r ${examples_dir}/c ${install_dir}/examples -cp -r ${examples_dir}/JDBC ${install_dir}/examples -cp -r ${examples_dir}/matlab ${install_dir}/examples -cp -r ${examples_dir}/python ${install_dir}/examples -cp -r ${examples_dir}/R ${install_dir}/examples -cp -r ${examples_dir}/go ${install_dir}/examples - + cp -r ${examples_dir}/c ${install_dir}/examples +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + cp -r ${examples_dir}/JDBC ${install_dir}/examples + cp -r ${examples_dir}/matlab ${install_dir}/examples + cp -r ${examples_dir}/python ${install_dir}/examples + cp -r ${examples_dir}/R ${install_dir}/examples + cp -r ${examples_dir}/go ${install_dir}/examples +fi # Copy driver mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver @@ -67,23 +117,46 @@ cp ${lib_files} ${install_dir}/driver # Copy connector connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector -cp ${build_dir}/lib/*.jar ${install_dir}/connector -cp -r ${connector_dir}/grafana ${install_dir}/connector/ -cp -r ${connector_dir}/python ${install_dir}/connector/ -cp -r ${connector_dir}/go ${install_dir}/connector - +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp -r ${connector_dir}/grafana ${install_dir}/connector/ + cp -r ${connector_dir}/python ${install_dir}/connector/ + cp -r ${connector_dir}/go ${install_dir}/connector +fi # Copy release note # cp ${script_dir}/release_note ${install_dir} # exit 1 -cd ${release_dir} -if [ -z "$armver" ]; then - tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files -elif [ "$armver" == "arm64" ]; then - tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files -elif [ "$armver" == "arm32" ]; then - tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${version}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${version}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode fi cd ${curr_dir} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index a62f7a5eeb44730f4d68d5f018eeea49b807da3a..eea36f4484707e8ae1a0a18b5296f7aa1d899ee0 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -4,7 +4,7 @@ # is required to use systemd to manage services at boot #set -x # -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -m "$0")) +script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory data_dir="/var/lib/taos" log_dir="/var/log/taos" diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 3c9fd6bf7ff7b3098d900535a52a3e81f1368a1e..28cc835f3057bf37bcd157aeb744fffa89771d0b 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -2,6 +2,11 @@ # # Script to stop the service and uninstall TDengine, but retain the config, data and log files. +set -e +#set -x + +verMode=edge + RED='\033[0;31m' GREEN='\033[1;32m' NC='\033[0m' @@ -14,10 +19,14 @@ cfg_link_dir="/usr/local/taos/cfg" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" service_config_dir="/etc/systemd/system" taos_service_name="taosd" - +nginx_service_name="nginxd" csudo="" if command -v sudo > /dev/null; then csudo="sudo" @@ -62,6 +71,7 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { @@ -90,6 +100,20 @@ function clean_service_on_systemd() { ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for TDengine is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi } function clean_service_on_sysvinit() { @@ -143,6 +167,7 @@ clean_config ${csudo} rm -rf ${data_link_dir} || : ${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) if echo $osinfo | grep -qwi "ubuntu" ; then diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 206de34c1f74325dd41773358bbe2c57690ca153..9210546a9f407fd821a176cadfcf88ae8023dc2f 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -17,6 +17,10 @@ bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" + +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" + csudo="" if command -v sudo > /dev/null; then csudo="sudo" @@ -39,6 +43,7 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { diff --git a/packaging/tools/repair_link.sh b/packaging/tools/repair_link.sh index 42b1082a9e848cfbf3320e62fdab3b1d3b48c616..7fd503f27013a9fce7208ece4335a1f427e05c9d 100755 --- a/packaging/tools/repair_link.sh +++ b/packaging/tools/repair_link.sh @@ -17,7 +17,7 @@ done declare -A dirHash for linkFile in $(find -L $linkDir -xtype l); do - targetFile=$(readlink -m $linkFile) + targetFile=$(readlink -f $linkFile) echo "targetFile: ${targetFile}" # TODO : Extract directory part and basename part dirName=$(dirname $(dirname ${targetFile})) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index d3baf84d6754ad2edcf1ac193cd46020760553ea..92d6b61eb2473c790c967a4a0091e233de84b8fa 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -24,20 +24,10 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) #set version of .so #VERSION so version #SOVERSION api version - IF (TD_LITE) - execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) - execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c - OUTPUT_VARIABLE - VERSION_INFO) - MESSAGE(STATUS "build lite version ${VERSION_INFO}") - ELSE () - execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) - execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c - OUTPUT_VARIABLE - VERSION_INFO) - MESSAGE(STATUS "build cluster version ${VERSION_INFO}") - ENDIF () - + execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) + execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c + OUTPUT_VARIABLE + VERSION_INFO) MESSAGE(STATUS "build version ${VERSION_INFO}") SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1) @@ -51,10 +41,13 @@ ELSEIF (TD_WINDOWS_64) # generate dynamic library (*.dll) ADD_LIBRARY(taos SHARED ${SRC}) - SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def) + IF (NOT TD_GODLL) + SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def) + ENDIF () TARGET_LINK_LIBRARIES(taos trpc) ELSEIF (TD_DARWIN_64) + SET(CMAKE_MACOSX_RPATH 1) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) ADD_LIBRARY(taos_static STATIC ${SRC}) @@ -64,6 +57,17 @@ ELSEIF (TD_DARWIN_64) # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) TARGET_LINK_LIBRARIES(taos trpc tutil pthread m) - + + SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) + + #set version of .so + #VERSION so version + #SOVERSION api version + execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) + execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c + OUTPUT_VARIABLE + VERSION_INFO) + MESSAGE(STATUS "build version ${VERSION_INFO}") + SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1) ENDIF () diff --git a/src/client/inc/tscJoinProcess.h b/src/client/inc/tscJoinProcess.h index 89f29807ac900c1465eb6f506a7d9b2a50aaaef4..34764e4db62469af14592a026015c88b53a03fa5 100644 --- a/src/client/inc/tscJoinProcess.h +++ b/src/client/inc/tscJoinProcess.h @@ -27,7 +27,7 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql); void tscGetQualifiedTSList(SSqlObj* pSql, SJoinSubquerySupporter* p1, SJoinSubquerySupporter* p2, int32_t* num); void tscSetupOutputColumnIndex(SSqlObj* pSql); -int32_t tscLaunchSecondSubquery(SSqlObj* pSql); +int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql); void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code); SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index); @@ -121,7 +121,7 @@ STSBuf* tsBufCreate(bool autoDelete); STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete); STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder); -void tsBufDestory(STSBuf* pTSBuf); +void* tsBufDestory(STSBuf* pTSBuf); void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData, int32_t len); int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx); diff --git a/src/client/inc/tscSQLParser.h b/src/client/inc/tscSQLParser.h index 34faad525b6b5e8472403cb616c833cc57b78b0f..dd579d08c3c6b1465ce6b34e29f453c7b00827ec 100644 --- a/src/client/inc/tscSQLParser.h +++ b/src/client/inc/tscSQLParser.h @@ -21,17 +21,78 @@ extern "C" { #endif #include "taos.h" +#include "taosmsg.h" #include "tsqldef.h" #include "ttypes.h" -#include "taosmsg.h" + +enum _sql_cmd { + TSDB_SQL_SELECT = 1, + TSDB_SQL_FETCH, + TSDB_SQL_INSERT, + + TSDB_SQL_MGMT, // the SQL below is for mgmt node + TSDB_SQL_CREATE_DB, + TSDB_SQL_CREATE_TABLE, + TSDB_SQL_DROP_DB, + TSDB_SQL_DROP_TABLE, + TSDB_SQL_CREATE_ACCT, + TSDB_SQL_CREATE_USER, //10 + TSDB_SQL_DROP_ACCT, + TSDB_SQL_DROP_USER, + TSDB_SQL_ALTER_USER, + TSDB_SQL_ALTER_ACCT, + TSDB_SQL_ALTER_TABLE, + TSDB_SQL_ALTER_DB, + TSDB_SQL_CREATE_MNODE, + TSDB_SQL_DROP_MNODE, + TSDB_SQL_CREATE_DNODE, + TSDB_SQL_DROP_DNODE, // 20 + TSDB_SQL_CFG_DNODE, + TSDB_SQL_CFG_MNODE, + TSDB_SQL_SHOW, + TSDB_SQL_RETRIEVE, + TSDB_SQL_KILL_QUERY, + TSDB_SQL_KILL_STREAM, + TSDB_SQL_KILL_CONNECTION, + + TSDB_SQL_READ, // SQL below is for read operation + TSDB_SQL_CONNECT, + TSDB_SQL_USE_DB, // 30 + TSDB_SQL_META, + TSDB_SQL_METRIC, + TSDB_SQL_MULTI_META, + TSDB_SQL_HB, + + TSDB_SQL_LOCAL, // SQL below for client local + TSDB_SQL_DESCRIBE_TABLE, + TSDB_SQL_RETRIEVE_METRIC, + TSDB_SQL_METRIC_JOIN_RETRIEVE, + TSDB_SQL_RETRIEVE_TAGS, + + /* + * build empty result instead of accessing dnode to fetch result + * reset the client cache + */ + TSDB_SQL_RETRIEVE_EMPTY_RESULT, //40 + + TSDB_SQL_RESET_CACHE, + TSDB_SQL_SERV_STATUS, + TSDB_SQL_CURRENT_DB, + TSDB_SQL_SERV_VERSION, + TSDB_SQL_CLI_VERSION, + TSDB_SQL_CURRENT_USER, + TSDB_SQL_CFG_LOCAL, + + TSDB_SQL_MAX //48 +}; #define MAX_TOKEN_LEN 30 // token type enum { - TSQL_NODE_TYPE_EXPR = 0x1, - TSQL_NODE_TYPE_ID = 0x2, - TSQL_NODE_TYPE_VALUE = 0x4, + TSQL_NODE_TYPE_EXPR = 0x1, + TSQL_NODE_TYPE_ID = 0x2, + TSQL_NODE_TYPE_VALUE = 0x4, }; extern char tTokenTypeSwitcher[13]; @@ -72,72 +133,12 @@ typedef struct tFieldList { TAOS_FIELD *p; } tFieldList; -// sql operation type +// create table operation type enum TSQL_TYPE { - TSQL_CREATE_NORMAL_METER = 0x01, - TSQL_CREATE_NORMAL_METRIC = 0x02, - TSQL_CREATE_METER_FROM_METRIC = 0x04, - TSQL_CREATE_STREAM = 0x08, - TSQL_QUERY_METER = 0x10, - TSQL_INSERT = 0x20, - - DROP_DNODE = 0x40, - DROP_DATABASE = 0x41, - DROP_TABLE = 0x42, - DROP_USER = 0x43, - DROP_ACCOUNT = 0x44, - - USE_DATABASE = 0x50, - - // show operation - SHOW_DATABASES = 0x60, - SHOW_TABLES = 0x61, - SHOW_STABLES = 0x62, - SHOW_MNODES = 0x63, - SHOW_DNODES = 0x64, - SHOW_ACCOUNTS = 0x65, - SHOW_USERS = 0x66, - SHOW_VGROUPS = 0x67, - SHOW_QUERIES = 0x68, - SHOW_STREAMS = 0x69, - SHOW_CONFIGS = 0x6a, - SHOW_SCORES = 0x6b, - SHOW_MODULES = 0x6c, - SHOW_CONNECTIONS = 0x6d, - SHOW_GRANTS = 0x6e, - SHOW_VNODES = 0x6f, - - // create dnode - CREATE_DNODE = 0x80, - CREATE_DATABASE = 0x81, - CREATE_USER = 0x82, - CREATE_ACCOUNT = 0x83, - - DESCRIBE_TABLE = 0x90, - - ALTER_USER_PASSWD = 0xA0, - ALTER_USER_PRIVILEGES = 0xA1, - ALTER_DNODE = 0xA2, - ALTER_LOCAL = 0xA3, - ALTER_DATABASE = 0xA4, - ALTER_ACCT = 0xA5, - - // reset operation - RESET_QUERY_CACHE = 0xB0, - - // alter tags - ALTER_TABLE_TAGS_ADD = 0xC0, - ALTER_TABLE_TAGS_DROP = 0xC1, - ALTER_TABLE_TAGS_CHG = 0xC2, - ALTER_TABLE_TAGS_SET = 0xC4, - - // alter table column - ALTER_TABLE_ADD_COLUMN = 0xD0, - ALTER_TABLE_DROP_COLUMN = 0xD1, - - KILL_QUERY = 0xD2, - KILL_STREAM = 0xD3, - KILL_CONNECTION = 0xD4, + TSQL_CREATE_TABLE = 0x1, + TSQL_CREATE_STABLE = 0x2, + TSQL_CREATE_TABLE_FROM_STABLE = 0x3, + TSQL_CREATE_STREAM = 0x4, }; typedef struct SQuerySQL { @@ -157,33 +158,31 @@ typedef struct SQuerySQL { typedef struct SCreateTableSQL { struct SSQLToken name; // meter name, create table [meterName] xxx bool existCheck; - + + int8_t type; // create normal table/from super table/ stream struct { tFieldList *pTagColumns; // for normal table, pTagColumns = NULL; tFieldList *pColumns; } colInfo; struct { - SSQLToken metricName; // metric name, for using clause + SSQLToken stableName; // super table name, for using clause tVariantList *pTagVals; // create by using metric, tag value + STagData tagdata; } usingInfo; SQuerySQL *pSelect; - } SCreateTableSQL; typedef struct SAlterTableSQL { SSQLToken name; + int16_t type; + STagData tagData; + tFieldList * pAddColumns; - SSQLToken dropTagToken; tVariantList *varList; // set t=val or: change src dst } SAlterTableSQL; -typedef struct SInsertSQL { - SSQLToken name; - struct tSQLExprListList *pValue; -} SInsertSQL; - typedef struct SCreateDBInfo { SSQLToken dbname; int32_t replica; @@ -204,41 +203,68 @@ typedef struct SCreateDBInfo { } SCreateDBInfo; typedef struct SCreateAcctSQL { - int32_t users; - int32_t dbs; - int32_t tseries; - int32_t streams; - int32_t pps; - int64_t storage; - int64_t qtime; - int32_t conns; + int32_t maxUsers; + int32_t maxDbs; + int32_t maxTimeSeries; + int32_t maxStreams; + int32_t maxPointsPerSecond; + int64_t maxStorage; + int64_t maxQueryTime; + int32_t maxConnections; SSQLToken stat; } SCreateAcctSQL; +typedef struct SShowInfo { + uint8_t showType; + SSQLToken prefix; + SSQLToken pattern; +} SShowInfo; + +typedef struct SUserInfo { + SSQLToken user; + SSQLToken passwd; +// bool hasPasswd; + + SSQLToken privilege; +// bool hasPrivilege; + + int16_t type; +} SUserInfo; + typedef struct tDCLSQL { int32_t nTokens; /* Number of expressions on the list */ int32_t nAlloc; /* Number of entries allocated below */ SSQLToken *a; /* one entry for element */ + bool existsCheck; union { SCreateDBInfo dbOpt; SCreateAcctSQL acctOpt; + SShowInfo showOpt; + SSQLToken ip; }; + + SUserInfo user; + } tDCLSQL; +typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause + SQuerySQL **pClause; + int32_t numOfClause; +} SSubclauseInfo; + typedef struct SSqlInfo { - int32_t sqlType; - bool validSql; + int32_t type; + bool valid; union { SCreateTableSQL *pCreateTableInfo; - SInsertSQL * pInsertInfo; SAlterTableSQL * pAlterInfo; - SQuerySQL * pQueryInfo; tDCLSQL * pDCLInfo; }; - char pzErrMsg[256]; + SSubclauseInfo subclauseInfo; + char pzErrMsg[256]; } SSqlInfo; typedef struct tSQLExpr { @@ -338,31 +364,39 @@ SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection, SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName, tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type); -void tSQLExprDestroy(tSQLExpr *); -void tSQLExprNodeDestroy(tSQLExpr *pExpr); + +void tSQLExprNodeDestroy(tSQLExpr *pExpr); tSQLExpr *tSQLExprNodeClone(tSQLExpr *pExpr); SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type); tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList); -void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList); +void destroyAllSelectClause(SSubclauseInfo *pSql); +void doDestroyQuerySql(SQuerySQL *pSql); -void destroyQuerySql(SQuerySQL *pSql); +SSqlInfo * setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type); +SSubclauseInfo *setSubclause(SSubclauseInfo *pClause, void *pSqlExprInfo); -void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type); +SSubclauseInfo *appendSelectClause(SSubclauseInfo *pInfo, void *pSubclause); void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists); void SQLInfoDestroy(SSqlInfo *pInfo); void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...); +void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck); +void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns); tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken); void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists); void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo); +void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd); +void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip); +void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege); + void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo); // prefix show db.tables; diff --git a/src/client/inc/tscSecondaryMerge.h b/src/client/inc/tscSecondaryMerge.h index 4c95994dfab1ea1c5a259ed7f13ddd91b91c6e1e..08d995c9f3d789a82f5b8fa1331d8653a017181b 100644 --- a/src/client/inc/tscSecondaryMerge.h +++ b/src/client/inc/tscSecondaryMerge.h @@ -68,7 +68,7 @@ typedef struct SLocalReducer { bool hasPrevRow; // cannot be released bool hasUnprocessedRow; tOrderDescriptor * pDesc; - tColModel * resColModel; + SColumnModel * resColModel; tExtMemBuffer ** pExtMemBuffer; // disk-based buffer SInterpolationInfo interpolationInfo; // interpolation support structure char * pFinalRes; // result data after interpo @@ -90,21 +90,21 @@ typedef struct SSubqueryState { } SSubqueryState; typedef struct SRetrieveSupport { - tExtMemBuffer ** pExtMemBuffer; // for build loser tree + tExtMemBuffer ** pExtMemBuffer; // for build loser tree tOrderDescriptor *pOrderDescriptor; - tColModel * pFinalColModel; // colModel for final result + SColumnModel * pFinalColModel; // colModel for final result SSubqueryState * pState; - int32_t vnodeIdx; // index of current vnode in vnode list + int32_t subqueryIndex; // index of current vnode in vnode list SSqlObj * pParentSqlObj; - tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to - uint32_t numOfRetry; // record the number of retry times + tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to + uint32_t numOfRetry; // record the number of retry times pthread_mutex_t queryMutex; } SRetrieveSupport; int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc, - tColModel **pFinalModel, uint32_t nBufferSize); + SColumnModel **pFinalModel, uint32_t nBufferSize); -void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, tColModel *pFinalModel, +void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, int32_t numOfVnodes); int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data, @@ -116,11 +116,11 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF * create local reducer to launch the second-stage reduce process at client site */ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, - tColModel *finalModel, SSqlCmd *pSqlCmd, SSqlRes *pRes); + SColumnModel *finalModel, SSqlCmd *pSqlCmd, SSqlRes *pRes); void tscDestroyLocalReducer(SSqlObj *pSql); -int32_t tscLocalDoReduce(SSqlObj *pSql); +int32_t tscDoLocalreduce(SSqlObj *pSql); #ifdef __cplusplus } diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 6666fa24a605c186678b2642b9735432b75a8df4..9868f703c37f10c1c15df576899d7f2724bdb196 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -23,15 +23,15 @@ extern "C" { /* * @date 2018/09/30 */ -#include -#include +#include "os.h" #include "textbuffer.h" +#include "tscSecondaryMerge.h" #include "tsclient.h" #include "tsdb.h" -#include "tscSecondaryMerge.h" -#define UTIL_METER_IS_METRIC(metaInfo) (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC)) -#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo))) +#define UTIL_METER_IS_SUPERTABLE(metaInfo) \ + (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC)) +#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_SUPERTABLE(metaInfo))) #define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \ (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE)) @@ -52,7 +52,6 @@ typedef struct SParsedDataColInfo { typedef struct SJoinSubquerySupporter { SSubqueryState* pState; SSqlObj* pObj; // parent SqlObj - bool hasMore; // has data from vnode to fetch int32_t subqueryIndex; // index of sub query int64_t interval; // interval time SLimitVal limit; // limit info @@ -62,28 +61,30 @@ typedef struct SJoinSubquerySupporter { SFieldInfo fieldsInfo; STagCond tagCond; SSqlGroupbyExpr groupbyExpr; - - struct STSBuf* pTSBuf; - - FILE* f; - char path[PATH_MAX]; + struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array + FILE* f; // temporary file in order to create TSBuf + char path[PATH_MAX]; // temporary file path } SJoinSubquerySupporter; -void tscDestroyDataBlock(STableDataBlocks* pDataBlock); -STableDataBlocks* tscCreateDataBlock(int32_t size); -void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks); -SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, uint32_t offset); +int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name, + SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks); +void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks); +void tscDestroyDataBlock(STableDataBlocks* pDataBlock); + +SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, + uint32_t offset); SDataBlockList* tscCreateBlockArrayList(); -void* tscDestroyBlockArrayList(SDataBlockList* pList); + +void* tscDestroyBlockArrayList(SDataBlockList* pList); int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); -void tscFreeUnusedDataBlocks(SDataBlockList* pList); +void tscFreeUnusedDataBlocks(SDataBlockList* pList); int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList); -STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, char* tableId); -STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name); +int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, + int32_t startOffset, int32_t rowSize, const char* tableId, SMeterMeta* pMeterMeta, + STableDataBlocks** dataBlocks); -SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); +SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); /** @@ -94,29 +95,27 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); * @param pSql sql object * @return */ -bool tscIsPointInterpQuery(SSqlCmd* pCmd); -bool tscIsTWAQuery(SSqlCmd* pCmd); -bool tscProjectionQueryOnMetric(SSqlCmd* pCmd); -bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd); +bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo); +bool tscIsTWAQuery(SQueryInfo* pQueryInfo); + +bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex); +bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex); +bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex); + +bool tscProjectionQueryOnTable(SQueryInfo* pQueryInfo); + +bool tscIsTwoStageMergeMetricQuery(SQueryInfo* pQueryInfo, int32_t tableIndex); bool tscQueryOnMetric(SSqlCmd* pCmd); -bool tscQueryMetricTags(SSqlCmd* pCmd); +bool tscQueryMetricTags(SQueryInfo* pQueryInfo); bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd); -void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, +void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t isTag); -void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex); +void addRequiredTagColumn(SQueryInfo* pQueryInfo, int32_t tagColIndex, int32_t tableIndex); -//TODO refactor, remove -void SStringFree(SString* str); -void SStringCopy(SString* pDest, const SString* pSrc); -SString SStringCreate(const char* str); - -int32_t SStringAlloc(SString* pStr, int32_t size); -int32_t SStringEnsureRemain(SString* pStr, int32_t size); - -int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex); -void tscClearInterpInfo(SSqlCmd* pCmd); +int32_t setMeterID(SMeterMetaInfo* pMeterMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql); +void tscClearInterpInfo(SQueryInfo* pQueryInfo); bool tscIsInsertOrImportData(char* sqlstr); @@ -130,35 +129,39 @@ void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIE void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, const char* name, int16_t bytes); void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible); -void tscFieldInfoCalOffset(SSqlCmd* pCmd); -void tscFieldInfoUpdateOffset(SSqlCmd* pCmd); +void tscFieldInfoCalOffset(SQueryInfo* pQueryInfo); +void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo); void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList, int32_t size); -void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst); +void tscFieldInfoCopyAll(SFieldInfo* dst, SFieldInfo* src); -TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index); -int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index); -int32_t tscGetResRowLength(SSqlCmd* pCmd); -void tscClearFieldInfo(SFieldInfo* pFieldInfo); +TAOS_FIELD* tscFieldInfoGetField(SQueryInfo* pQueryInfo, int32_t index); +int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); +int32_t tscGetResRowLength(SQueryInfo* pQueryInfo); +void tscClearFieldInfo(SFieldInfo* pFieldInfo); +int32_t tscNumOfFields(SQueryInfo* pQueryInfo); +int32_t tscFieldInfoCompare(SFieldInfo* pFieldInfo1, SFieldInfo* pFieldInfo2); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); -SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t interSize); -SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId); +SSqlExpr* tscSqlExprInsertEmpty(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId); -SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, +SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size); -SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index); -void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid); +SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index); +void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid); +void* tscSqlExprDestroy(SSqlExpr* pExpr); +void tscSqlExprInfoDestroy(SSqlExprInfo* pExprInfo); -SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* colIndex); -void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src); -void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src); +SColumnBase* tscColumnBaseInfoInsert(SQueryInfo* pQueryInfo, SColumnIndex* colIndex); +void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src); +void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src); -void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex); +void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex); SColumnBase* tscColumnBaseInfoGet(SColumnBaseInfo* pColumnBaseInfo, int32_t index); -void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex); +void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex); void tscColumnBaseInfoReserve(SColumnBaseInfo* pColumnBaseInfo, int32_t size); void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo); @@ -167,40 +170,47 @@ int32_t tscValidateName(SSQLToken* pToken); void tscIncStreamExecutionCount(void* pStream); -bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId); +bool tscValidateColumnId(SMeterMetaInfo* pMeterMetaInfo, int32_t colId); // get starter position of metric query condition (query on tags) in SSqlCmd.payload SCond* tsGetMetricQueryCondPos(STagCond* pCond, uint64_t tableIndex); -void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); +void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); void tscTagCondCopy(STagCond* dest, const STagCond* src); void tscTagCondRelease(STagCond* pCond); -void tscTagCondSetQueryCondType(STagCond* pCond, int16_t type); -void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd); +void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo); void tscSetFreeHeatBeat(STscObj* pObj); bool tscShouldFreeHeatBeat(SSqlObj* pHb); void tscCleanSqlCmd(SSqlCmd* pCmd); bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql); -void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache); -SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index); -SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index); -void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache); +void tscRemoveAllMeterMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache); +SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex); +SMeterMetaInfo* tscGetMeterMetaInfoFromQueryInfo(SQueryInfo *pQueryInfo, int32_t tableIndex); + +SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex); +int32_t tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo); + +SMeterMetaInfo* tscGetMeterMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index); +void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache); -SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, +SMeterMetaInfo* tscAddMeterMetaInfo(SQueryInfo* pQueryInfo, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, int16_t numOfTags, int16_t* tags); -SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd); +SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SQueryInfo *pQueryInfo); +int32_t tscAddSubqueryInfo(SSqlCmd *pCmd); +void tscFreeSubqueryInfo(SSqlCmd* pCmd); +void tscClearSubqueryInfo(SSqlCmd* pCmd); -void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr, uint64_t uid); -int tscGetMetricMeta(SSqlObj* pSql); -int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex); -int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists); +void tscGetMetricMetaCacheKey(SQueryInfo* pQueryInfo, char* keyStr, uint64_t uid); +int tscGetMetricMeta(SSqlObj* pSql, int32_t clauseIndex); +int tscGetMeterMeta(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo); +int tscGetMeterMetaEx(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo, bool createIfNotExists); void tscResetForNextRetrieve(SSqlRes* pRes); -void tscAddTimestampColumn(SSqlCmd* pCmd, int16_t functionId, int16_t tableIndex); +void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex); void tscDoQuery(SSqlObj* pSql); /** @@ -220,18 +230,26 @@ void tscDoQuery(SSqlObj* pSql); * @param pPrevSql * @return */ -SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param, - SSqlObj* pPrevSql); -void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex); +SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql); +void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex); -void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex); +void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex); -int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid); +int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid); TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void (*fp)(void*, TAOS_RES*, int), void* param, void** taos); void sortRemoveDuplicates(STableDataBlocks* dataBuf); + +void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); + +bool hasMoreVnodesToTry(SSqlObj *pSql); +void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp); +void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows); +void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()); + + #ifdef __cplusplus } #endif diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 60ad60ab4a191f1c02b88b8470123a327c5e29a5..fcbd3dac1f27a2bfadc470fcbab7075c1fab6246 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -20,14 +20,6 @@ extern "C" { #endif -#include -#include -#include -#include -#include -#include -#include - #include "os.h" #include "taos.h" #include "taosmsg.h" @@ -39,88 +31,30 @@ extern "C" { #include "tsqlfunction.h" #include "tutil.h" -#define TSC_GET_RESPTR_BASE(res, cmd, col, ord) \ - ((res->data + tscFieldInfoGetOffset(cmd, col) * res->numOfRows) + \ - (1 - ord.order) * (res->numOfRows - 1) * tscFieldInfoGetField(cmd, col)->bytes) - -enum _sql_cmd { - TSDB_SQL_SELECT, - TSDB_SQL_FETCH, - TSDB_SQL_INSERT, - - TSDB_SQL_MGMT, // the SQL below is for mgmt node - TSDB_SQL_CREATE_DB, - TSDB_SQL_CREATE_TABLE, - TSDB_SQL_DROP_DB, - TSDB_SQL_DROP_TABLE, - TSDB_SQL_CREATE_ACCT, - TSDB_SQL_CREATE_USER, - TSDB_SQL_DROP_ACCT, // 10 - TSDB_SQL_DROP_USER, - TSDB_SQL_ALTER_USER, - TSDB_SQL_ALTER_ACCT, - TSDB_SQL_ALTER_TABLE, - TSDB_SQL_ALTER_DB, - TSDB_SQL_CREATE_MNODE, - TSDB_SQL_DROP_MNODE, - TSDB_SQL_CREATE_DNODE, - TSDB_SQL_DROP_DNODE, - TSDB_SQL_CFG_DNODE, // 20 - TSDB_SQL_CFG_MNODE, - TSDB_SQL_SHOW, - TSDB_SQL_RETRIEVE, - TSDB_SQL_KILL_QUERY, - TSDB_SQL_KILL_STREAM, - TSDB_SQL_KILL_CONNECTION, - - TSDB_SQL_READ, // SQL below is for read operation - TSDB_SQL_CONNECT, - TSDB_SQL_USE_DB, - TSDB_SQL_META, // 30 - TSDB_SQL_METRIC, - TSDB_SQL_MULTI_META, - TSDB_SQL_HB, - - TSDB_SQL_LOCAL, // SQL below for client local - TSDB_SQL_DESCRIBE_TABLE, - TSDB_SQL_RETRIEVE_METRIC, - TSDB_SQL_METRIC_JOIN_RETRIEVE, - TSDB_SQL_RETRIEVE_TAGS, - /* - * build empty result instead of accessing dnode to fetch result - * reset the client cache - */ - TSDB_SQL_RETRIEVE_EMPTY_RESULT, - - TSDB_SQL_RESET_CACHE, // 40 - TSDB_SQL_SERV_STATUS, - TSDB_SQL_CURRENT_DB, - TSDB_SQL_SERV_VERSION, - TSDB_SQL_CLI_VERSION, - TSDB_SQL_CURRENT_USER, - TSDB_SQL_CFG_LOCAL, - - TSDB_SQL_MAX -}; - +#define TSC_GET_RESPTR_BASE(res, _queryinfo, col, ord) \ + (res->data + tscFieldInfoGetOffset(_queryinfo, col) * res->numOfRows) + // forward declaration struct SSqlInfo; typedef struct SSqlGroupbyExpr { - int16_t tableIndex; - + int16_t tableIndex; int16_t numOfGroupCols; SColIndexEx columnInfo[TSDB_MAX_TAGS]; // group by columns information - - int16_t orderIndex; // order by column index - int16_t orderType; // order by type: asc/desc + int16_t orderIndex; // order by column index + int16_t orderType; // order by type: asc/desc } SSqlGroupbyExpr; typedef struct SMeterMetaInfo { SMeterMeta * pMeterMeta; // metermeta SMetricMeta *pMetricMeta; // metricmeta - char name[TSDB_METER_ID_LEN + 1]; + /* + * 1. keep the vnode index during the multi-vnode super table projection query + * 2. keep the vnode index for multi-vnode insertion + */ + int32_t vnodeIndex; + char name[TSDB_METER_ID_LEN + 1]; // table(super table) name int16_t numOfTags; // total required tags in query, including groupby tags int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection } SMeterMetaInfo; @@ -179,16 +113,9 @@ typedef struct SColumnBaseInfo { struct SLocalReducer; -// todo move to utility -typedef struct SString { - int32_t alloc; - int32_t n; - char * z; -} SString; - typedef struct SCond { uint64_t uid; - SString cond; + char * cond; } SCond; typedef struct SJoinNode { @@ -227,18 +154,24 @@ typedef struct SParamInfo { } SParamInfo; typedef struct STableDataBlocks { - char meterId[TSDB_METER_ID_LEN]; - int8_t tsSource; - bool ordered; - - int64_t vgid; - int64_t prevTS; - - int32_t numOfMeters; - - int32_t rowSize; + char meterId[TSDB_METER_ID_LEN]; + int8_t tsSource; // where does the UNIX timestamp come from, server or client + bool ordered; // if current rows are ordered or not + int64_t vgid; // virtual group id + int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending + int32_t numOfMeters; // number of tables in current submit block + + int32_t rowSize; // row size for current table uint32_t nAllocSize; + uint32_t headerSize; // header for metadata (submit metadata) uint32_t size; + + /* + * the metermeta for current table, the metermeta will be used during submit stage, keep a ref + * to avoid it to be removed from cache + */ + SMeterMeta *pMeterMeta; + union { char *filename; char *pData; @@ -252,60 +185,76 @@ typedef struct STableDataBlocks { typedef struct SDataBlockList { int32_t idx; - int32_t nSize; - int32_t nAlloc; + uint32_t nSize; + uint32_t nAlloc; char * userParam; /* user assigned parameters for async query */ void * udfp; /* user defined function pointer, used in async model */ STableDataBlocks **pData; } SDataBlockList; -typedef struct { - SOrderVal order; - int command; - int count;// TODO refactor +typedef struct SQueryInfo { + int16_t command; // the command may be different for each subclause, so keep it seperately. + uint16_t type; // query/insert/import type + char intervalTimeUnit; - union { - bool existsCheck; // check if the table exists - int8_t showType; // show command type - }; - - int8_t isInsertFromFile; // load data from file or not - bool import; // import/insert type - char msgType; - uint16_t type; // query type - char intervalTimeUnit; int64_t etime, stime; int64_t nAggTimeInterval; // aggregation time interval int64_t nSlidingTime; // sliding window in mseconds SSqlGroupbyExpr groupbyExpr; // group by tags info - /* - * use to keep short request msg and error msg, in such case, SSqlCmd->payload == SSqlCmd->ext; - * create table/query/insert operations will exceed the TSDB_SQLCMD_SIZE. - * - * In such cases, allocate the memory dynamically, and need to free the memory - */ - uint32_t allocSize; - char * payload; - int payloadLen; - short numOfCols; - SColumnBaseInfo colList; - SFieldInfo fieldsInfo; - SSqlExprInfo exprsInfo; - SLimitVal limit; - SLimitVal slimit; - int64_t globalLimit; - STagCond tagCond; - int16_t vnodeIdx; // vnode index in pMetricMeta for metric query - int16_t interpoType; // interpolate type - int16_t numOfTables; - - // submit data blocks branched according to vnode - SDataBlockList * pDataBlocks; + SColumnBaseInfo colList; + SFieldInfo fieldsInfo; + SSqlExprInfo exprsInfo; + SLimitVal limit; + SLimitVal slimit; + STagCond tagCond; + SOrderVal order; + int16_t interpoType; // interpolate type + int16_t numOfTables; SMeterMetaInfo **pMeterInfo; struct STSBuf * tsBuf; - // todo use dynamic allocated memory for defaultVal - int64_t defaultVal[TSDB_MAX_COLUMNS]; // default value for interpolation + int64_t * defaultVal; // default value for interpolation + char * msg; // pointer to the pCmd->payload to keep error message temporarily + int64_t clauseLimit; // limit for current sub clause + + // offset value in the original sql expression, NOT sent to virtual node, only applied at client side + int64_t prjOffset; +} SQueryInfo; + +// data source from sql string or from file +enum { + DATA_FROM_SQL_STRING = 1, + DATA_FROM_DATA_FILE = 2, +}; + +typedef struct { + int command; + uint8_t msgType; + + union { + bool existsCheck; // check if the table exists or not + bool inStream; // denote if current sql is executed in stream or not + bool createOnDemand; // if the table is missing, on-the-fly create it. during getmeterMeta + int8_t dataSourceType; // load data from file or not + }; + + union { + int32_t count; + int32_t numOfTablesInSubmit; + }; + + int32_t clauseIndex; // index of multiple subclause query + int8_t isParseFinish; + short numOfCols; + uint32_t allocSize; + char * payload; + int payloadLen; + + SQueryInfo **pQueryInfo; + int32_t numOfClause; + + // submit data blocks branched according to vnode + SDataBlockList *pDataBlocks; // for parameter ('?') binding and batch processing int32_t batchSize; @@ -321,12 +270,15 @@ struct STSBuf; typedef struct { uint8_t code; - int numOfRows; // num of results in current retrieved - int numOfTotal; // num of total results + int64_t numOfRows; // num of results in current retrieved + int64_t numOfTotal; // num of total results + int64_t numOfTotalInCurrentClause; // num of total result in current subclause + char * pRsp; int rspType; int rspLen; uint64_t qhandle; + int64_t uid; int64_t useconds; int64_t offset; // offset value from vnode during projection query of stable int row; @@ -366,28 +318,27 @@ typedef struct _sql_obj { STscObj *pTscObj; void (*fp)(); void (*fetchFp)(); - void * param; - uint32_t ip; - short vnode; - int64_t stime; - uint32_t queryId; - void * thandle; - void * pStream; - char * sqlstr; - char retry; - char maxRetry; - char index; - char freed : 4; - char listed : 4; - tsem_t rspSem; - tsem_t emptyRspSem; - - SSqlCmd cmd; - SSqlRes res; - - char numOfSubs; - char* asyncTblPos; - void* pTableHashList; + void * param; + uint32_t ip; + short vnode; + int64_t stime; + uint32_t queryId; + void * thandle; + void * pStream; + void * pSubscription; + char * sqlstr; + char retry; + char maxRetry; + uint8_t index; + char freed : 4; + char listed : 4; + tsem_t rspSem; + tsem_t emptyRspSem; + SSqlCmd cmd; + SSqlRes res; + uint8_t numOfSubs; + char * asyncTblPos; + void * pTableHashList; struct _sql_obj **pSubs; struct _sql_obj * prev, *next; } SSqlObj; @@ -427,9 +378,11 @@ typedef struct { } SIpStrList; // tscSql API -int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion); +int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion); + +void tscInitMsgs(); +extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo); -void tscInitMsgs(); void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle); int tscProcessSql(SSqlObj *pSql); @@ -448,15 +401,22 @@ int taos_retrieve(TAOS_RES *res); * transfer function for metric query in stream computing, the function need to be change * before send query message to vnode */ -int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd *pCmd); -void tscRestoreSQLFunctionForMetricQuery(SSqlCmd *pCmd); +int32_t tscTansformSQLFunctionForSTableQuery(SQueryInfo *pQueryInfo); +void tscRestoreSQLFunctionForMetricQuery(SQueryInfo *pQueryInfo); void tscClearSqlMetaInfoForce(SSqlCmd *pCmd); -int32_t tscCreateResPointerInfo(SSqlCmd *pCmd, SSqlRes *pRes); +int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo); void tscDestroyResPointerInfo(SSqlRes *pRes); void tscFreeSqlCmdData(SSqlCmd *pCmd); +void tscFreeResData(SSqlObj* pSql); + +/** + * free query result of the sql object + * @param pObj + */ +void tscFreeSqlResult(SSqlObj* pSql); /** * only free part of resources allocated during query. @@ -474,11 +434,15 @@ void tscFreeSqlObj(SSqlObj *pObj); void tscCloseTscObj(STscObj *pObj); -void tscProcessMultiVnodesInsert(SSqlObj *pSql); -void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql); -void tscKillMetricQuery(SSqlObj *pSql); -void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); -bool tscIsUpdateQuery(STscObj *pObj); +void tscProcessMultiVnodesInsert(SSqlObj *pSql); +void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql); +void tscKillMetricQuery(SSqlObj *pSql); +void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); +bool tscIsUpdateQuery(STscObj *pObj); +bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes); + +char *tscGetErrorMsgPayload(SSqlCmd *pCmd); + int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); // transfer SSqlInfo to SqlCmd struct @@ -499,6 +463,8 @@ extern int tsInsertHeadSize; extern int tscNumOfThreads; extern SIpStrList tscMgmtIpList; +typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows); + #ifdef __cplusplus } #endif diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 958252b4deca7708c99e6b762613813c2f9d330b..8dbe63d75a73dd18a15bc1da8f99c7b8db774eea 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -135,7 +135,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;JI)J */ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp - (JNIEnv *, jobject, jstring, jstring, jstring, jstring, jstring, jlong, jint); + (JNIEnv *, jobject, jlong, jboolean, jstring, jstring, jint); /* * Class: com_taosdata_jdbc_TSDBJNIConnector @@ -143,7 +143,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp * Signature: (J)Lcom/taosdata/jdbc/TSDBResultSetRowData; */ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp - (JNIEnv *, jobject, jlong); + (JNIEnv *, jobject, jlong, jint); /* * Class: com_taosdata_jdbc_TSDBJNIConnector @@ -151,7 +151,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp * Signature: (J)V */ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp - (JNIEnv *, jobject, jlong); + (JNIEnv *, jobject, jlong, jboolean); /* * Class: com_taosdata_jdbc_TSDBJNIConnector diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 71f983dadbe243e78330ea7f8d361bb5712f449e..228403c79d318d922f5571a9663b3c97bbffbbc8 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -20,6 +20,7 @@ #include "tscJoinProcess.h" #include "tsclient.h" #include "tscUtil.h" +#include "ttime.h" int __init = 0; @@ -239,7 +240,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J jbyteArray jsql, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); + jniError("jobj:%p, connection is already closed", jobj); return JNI_CONNECTION_NULL; } @@ -252,6 +253,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J char *dst = (char *)calloc(1, sizeof(char) * (len + 1)); if (dst == NULL) { + jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon); return JNI_OUT_OF_MEMORY; } @@ -260,9 +262,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J //todo handle error } + jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst); + int code = taos_query(tscon, dst); if (code != 0) { - jniError("jobj:%p, conn:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst); + jniError("jobj:%p, conn:%p, code:%d, msg:%s", jobj, tscon, code, taos_errstr(tscon)); free(dst); return JNI_TDENGINE_ERROR; } else { @@ -271,9 +275,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J if (pSql->cmd.command == TSDB_SQL_INSERT) { affectRows = taos_affected_rows(tscon); - jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d, sql:%s", jobj, tscon, code, affectRows, dst); + jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows); } else { - jniTrace("jobj:%p, conn:%p, code:%d, sql:%s", jobj, tscon, code, dst); + jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code); } free(dst); @@ -307,7 +311,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp( if (tscIsUpdateQuery(tscon)) { ret = 0; // for update query, no result pointer - jniTrace("jobj:%p, conn:%p, no result", jobj, tscon); + jniTrace("jobj:%p, conn:%p, no resultset", jobj, tscon); } else { ret = (jlong) taos_use_result(tscon); jniTrace("jobj:%p, conn:%p, get resultset:%p", jobj, tscon, (void *) ret); @@ -463,11 +467,17 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn case TSDB_DATA_TYPE_BIGINT: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i])); break; - case TSDB_DATA_TYPE_FLOAT: - (*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i])); + case TSDB_DATA_TYPE_FLOAT: { + float fv = 0; + fv = GET_FLOAT_VAL(row[i]); + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv); + } break; - case TSDB_DATA_TYPE_DOUBLE: - (*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i])); + case TSDB_DATA_TYPE_DOUBLE: { + double dv = 0; + dv = GET_DOUBLE_VAL(row[i]); + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv); + } break; case TSDB_DATA_TYPE_BINARY: { strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist @@ -496,7 +506,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); + jniError("jobj:%p, connection is already closed", jobj); return JNI_CONNECTION_NULL; } else { jniTrace("jobj:%p, conn:%p, close connection success", jobj, tscon); @@ -505,92 +515,42 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm } } -JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jstring jhost, - jstring juser, jstring jpass, jstring jdb, - jstring jtable, jlong jtime, - jint jperiod) { - TAOS_SUB *tsub; - jlong sub = 0; - char * host = NULL; - char * user = NULL; - char * pass = NULL; - char * db = NULL; - char * table = NULL; - int64_t time = 0; - int period = 0; +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con, + jboolean restart, jstring jtopic, jstring jsql, jint jinterval) { + jlong sub = 0; + TAOS *taos = (TAOS *)con; + char *topic = NULL; + char *sql = NULL; jniGetGlobalMethod(env); jniTrace("jobj:%p, in TSDBJNIConnector_subscribeImp", jobj); - if (jhost != NULL) { - host = (char *)(*env)->GetStringUTFChars(env, jhost, NULL); - } - if (juser != NULL) { - user = (char *)(*env)->GetStringUTFChars(env, juser, NULL); - } - if (jpass != NULL) { - pass = (char *)(*env)->GetStringUTFChars(env, jpass, NULL); - } - if (jdb != NULL) { - db = (char *)(*env)->GetStringUTFChars(env, jdb, NULL); - } - if (jtable != NULL) { - table = (char *)(*env)->GetStringUTFChars(env, jtable, NULL); - } - time = (int64_t)jtime; - period = (int)jperiod; - - if (user == NULL) { - jniTrace("jobj:%p, user is null, use tsDefaultUser", jobj); - user = tsDefaultUser; + if (jtopic != NULL) { + topic = (char *)(*env)->GetStringUTFChars(env, jtopic, NULL); } - if (pass == NULL) { - jniTrace("jobj:%p, pass is null, use tsDefaultPass", jobj); - pass = tsDefaultPass; + if (jsql != NULL) { + sql = (char *)(*env)->GetStringUTFChars(env, jsql, NULL); } - jniTrace("jobj:%p, host:%s, user:%s, pass:%s, db:%s, table:%s, time:%d, period:%d", jobj, host, user, pass, db, table, - time, period); - tsub = taos_subscribe(host, user, pass, db, table, time, period); + TAOS_SUB *tsub = taos_subscribe(taos, (int)restart, topic, sql, NULL, NULL, jinterval); sub = (jlong)tsub; if (sub == 0) { - jniTrace("jobj:%p, failed to subscribe to db:%s, table:%s", jobj, db, table); + jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, jtopic); } else { - jniTrace("jobj:%p, successfully subscribe to db:%s, table:%s, sub:%ld, tsub:%p", jobj, db, table, sub, tsub); + jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, jtopic); } - if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host); - if (user != NULL && user != tsDefaultUser) (*env)->ReleaseStringUTFChars(env, juser, user); - if (pass != NULL && pass != tsDefaultPass) (*env)->ReleaseStringUTFChars(env, jpass, pass); - if (db != NULL) (*env)->ReleaseStringUTFChars(env, jdb, db); - if (table != NULL) (*env)->ReleaseStringUTFChars(env, jtable, table); + if (topic != NULL) (*env)->ReleaseStringUTFChars(env, jtopic, topic); + if (sql != NULL) (*env)->ReleaseStringUTFChars(env, jsql, sql); return sub; } -JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) { - jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); - - TAOS_SUB * tsub = (TAOS_SUB *)sub; - TAOS_ROW row = taos_consume(tsub); - TAOS_FIELD *fields = taos_fetch_subfields(tsub); - int num_fields = taos_subfields_count(tsub); - - jniGetGlobalMethod(env); - - jniTrace("jobj:%p, check fields:%p, num_fields=%d", jobj, fields, num_fields); - +static jobject convert_one_row(JNIEnv *env, TAOS_ROW row, TAOS_FIELD* fields, int num_fields) { jobject rowobj = (*env)->NewObject(env, g_rowdataClass, g_rowdataConstructor, num_fields); jniTrace("created a rowdata object, rowobj:%p", rowobj); - if (row == NULL) { - jniTrace("jobj:%p, tsub:%p, fields size is %d, fetch row to the end", jobj, tsub, num_fields); - return NULL; - } - - char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; - for (int i = 0; i < num_fields; i++) { if (row[i] == NULL) { continue; @@ -612,13 +572,20 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI case TSDB_DATA_TYPE_BIGINT: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i])); break; - case TSDB_DATA_TYPE_FLOAT: - (*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i])); + case TSDB_DATA_TYPE_FLOAT: { + float fv = 0; + fv = GET_FLOAT_VAL(row[i]); + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv); + } break; - case TSDB_DATA_TYPE_DOUBLE: - (*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i])); + case TSDB_DATA_TYPE_DOUBLE:{ + double dv = 0; + dv = GET_DOUBLE_VAL(row[i]); + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv); + } break; case TSDB_DATA_TYPE_BINARY: { + char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist (*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp)); @@ -627,7 +594,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI } case TSDB_DATA_TYPE_NCHAR: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i, - jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes)); + jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes)); break; case TSDB_DATA_TYPE_TIMESTAMP: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i])); @@ -636,13 +603,56 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI break; } } - jniTrace("jobj:%p, rowdata retrieved, rowobj:%p", jobj, rowobj); return rowobj; } -JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub) { +JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub, jint timeout) { + jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); + jniGetGlobalMethod(env); + + TAOS_SUB *tsub = (TAOS_SUB *)sub; + jobject rows = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp); + + int64_t start = taosGetTimestampMs(); + int count = 0; + + while (true) { + TAOS_RES * res = taos_consume(tsub); + if (res == NULL) { + jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub); + return NULL; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + int num_fields = taos_num_fields(res); + while (true) { + TAOS_ROW row = taos_fetch_row(res); + if (row == NULL) { + break; + } + jobject rowobj = convert_one_row(env, row, fields, num_fields); + (*env)->CallBooleanMethod(env, rows, g_arrayListAddFp, rowobj); + count++; + } + + if (count > 0) { + break; + } + if (timeout == -1) { + continue; + } + if (((int)(taosGetTimestampMs() - start)) >= timeout) { + jniTrace("jobj:%p, sub:%ld, timeout", jobj, sub); + break; + } + } + + return rows; +} + +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub, jboolean keepProgress) { TAOS_SUB *tsub = (TAOS_SUB *)sub; - taos_unsubscribe(tsub); + taos_unsubscribe(tsub, keepProgress); } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp(JNIEnv *env, jobject jobj, @@ -675,4 +685,4 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) { return (*env)->NewStringUTF(env, (const char *)tsCharset); -} \ No newline at end of file +} diff --git a/src/client/src/sql.c b/src/client/src/sql.c index e0d96623e26dd29ab2c440e6678026f761ada67d..54df3e36696359b429aa97ad5f06c6d93b8e67bf 100644 --- a/src/client/src/sql.c +++ b/src/client/src/sql.c @@ -78,39 +78,41 @@ ** defined, then do no error processing. ** YYNSTATE the combined number of states. ** YYNRULE the number of rules in the grammar +** YYNTOKEN Number of terminal symbols ** YY_MAX_SHIFT Maximum value for shift actions ** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions ** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions -** YY_MIN_REDUCE Maximum value for reduce actions ** YY_ERROR_ACTION The yy_action[] code for syntax error ** YY_ACCEPT_ACTION The yy_action[] code for accept ** YY_NO_ACTION The yy_action[] code for no-op +** YY_MIN_REDUCE Minimum value for reduce actions +** YY_MAX_REDUCE Maximum value for reduce actions */ #ifndef INTERFACE # define INTERFACE 1 #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 262 +#define YYNOCODE 268 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SSQLToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SQuerySQL* yy138; - SCreateAcctSQL yy155; - SLimitVal yy162; - int yy220; - tVariant yy236; - tSQLExprListList* yy237; - tSQLExpr* yy244; - SCreateDBInfo yy262; - tSQLExprList* yy284; - SCreateTableSQL* yy344; - int64_t yy369; - TAOS_FIELD yy397; - tFieldList* yy421; - tVariantList* yy480; + tVariantList* yy30; + SLimitVal yy150; + SCreateTableSQL* yy212; + SCreateAcctSQL yy239; + int yy250; + SSubclauseInfo* yy309; + tFieldList* yy325; + tVariant yy380; + tSQLExpr* yy388; + SQuerySQL* yy444; + int64_t yy489; + TAOS_FIELD yy505; + tSQLExprList* yy506; + SCreateDBInfo yy532; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -120,22 +122,19 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 252 +#define YYNSTATE 247 #define YYNRULE 216 -#define YY_MAX_SHIFT 251 -#define YY_MIN_SHIFTREDUCE 403 -#define YY_MAX_SHIFTREDUCE 618 -#define YY_MIN_REDUCE 619 -#define YY_MAX_REDUCE 834 -#define YY_ERROR_ACTION 835 -#define YY_ACCEPT_ACTION 836 -#define YY_NO_ACTION 837 +#define YYNTOKEN 203 +#define YY_MAX_SHIFT 246 +#define YY_MIN_SHIFTREDUCE 399 +#define YY_MAX_SHIFTREDUCE 614 +#define YY_ERROR_ACTION 615 +#define YY_ACCEPT_ACTION 616 +#define YY_NO_ACTION 617 +#define YY_MIN_REDUCE 618 +#define YY_MAX_REDUCE 833 /************* End control #defines *******************************************/ -/* The yyzerominor constant is used to initialize instances of -** YYMINORTYPE objects to zero. */ -static const YYMINORTYPE yyzerominor = { 0 }; - /* Define the yytestcase() macro to be a no-op if is not already defined ** otherwise. ** @@ -163,9 +162,6 @@ static const YYMINORTYPE yyzerominor = { 0 }; ** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then ** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE. ** -** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE -** and YY_MAX_REDUCE - ** N == YY_ERROR_ACTION A syntax error has occurred. ** ** N == YY_ACCEPT_ACTION The parser accepts its input. @@ -173,21 +169,22 @@ static const YYMINORTYPE yyzerominor = { 0 }; ** N == YY_NO_ACTION No such action. Denotes unused ** slots in the yy_action[] table. ** +** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE +** and YY_MAX_REDUCE +** ** The action table is constructed as a single large table named yy_action[]. -** Given state S and lookahead X, the action is computed as +** Given state S and lookahead X, the action is computed as either: ** -** yy_action[ yy_shift_ofst[S] + X ] +** (A) N = yy_action[ yy_shift_ofst[S] + X ] +** (B) N = yy_default[S] ** -** If the index value yy_shift_ofst[S]+X is out of range or if the value -** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S] -** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table -** and that yy_default[S] should be used instead. +** The (A) formula is preferred. The B formula is used instead if +** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X. ** -** The formula above is for computing the action when the lookahead is +** The formulas above are for computing the action when the lookahead is ** a terminal symbol. If the lookahead is a non-terminal (as occurs after ** a reduce action) then the yy_reduce_ofst[] array is used in place of -** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of -** YY_SHIFT_USE_DFLT. +** the yy_shift_ofst[] array. ** ** The following are the tables generated in this section: ** @@ -201,198 +198,212 @@ static const YYMINORTYPE yyzerominor = { 0 }; ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (531) +#define YY_ACTTAB_COUNT (529) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 443, 74, 78, 244, 85, 77, 153, 249, 444, 836, - /* 10 */ 251, 80, 43, 45, 7, 37, 38, 62, 111, 171, - /* 20 */ 31, 443, 443, 205, 41, 39, 42, 40, 241, 444, - /* 30 */ 444, 135, 36, 35, 10, 101, 34, 33, 32, 43, - /* 40 */ 45, 600, 37, 38, 156, 524, 135, 31, 135, 133, - /* 50 */ 205, 41, 39, 42, 40, 159, 601, 158, 601, 36, - /* 60 */ 35, 154, 514, 34, 33, 32, 404, 405, 406, 407, - /* 70 */ 408, 409, 410, 411, 412, 413, 414, 415, 250, 21, - /* 80 */ 43, 45, 172, 37, 38, 227, 226, 202, 31, 59, - /* 90 */ 21, 205, 41, 39, 42, 40, 34, 33, 32, 57, - /* 100 */ 36, 35, 550, 551, 34, 33, 32, 45, 232, 37, - /* 110 */ 38, 167, 132, 511, 31, 21, 21, 205, 41, 39, - /* 120 */ 42, 40, 168, 569, 511, 502, 36, 35, 134, 178, - /* 130 */ 34, 33, 32, 243, 37, 38, 186, 512, 183, 31, - /* 140 */ 532, 101, 205, 41, 39, 42, 40, 228, 233, 511, - /* 150 */ 511, 36, 35, 230, 229, 34, 33, 32, 17, 219, - /* 160 */ 242, 218, 217, 216, 215, 214, 213, 212, 211, 496, - /* 170 */ 139, 485, 486, 487, 488, 489, 490, 491, 492, 493, - /* 180 */ 494, 495, 163, 582, 11, 97, 573, 133, 576, 529, - /* 190 */ 579, 597, 163, 582, 166, 556, 573, 200, 576, 155, - /* 200 */ 579, 36, 35, 148, 220, 34, 33, 32, 21, 87, - /* 210 */ 86, 142, 514, 243, 160, 161, 101, 147, 204, 248, - /* 220 */ 247, 426, 514, 76, 160, 161, 163, 582, 530, 241, - /* 230 */ 573, 101, 576, 513, 579, 193, 41, 39, 42, 40, - /* 240 */ 242, 596, 510, 27, 36, 35, 49, 571, 34, 33, - /* 250 */ 32, 114, 115, 224, 65, 68, 505, 441, 160, 161, - /* 260 */ 124, 192, 518, 50, 188, 515, 499, 516, 498, 517, - /* 270 */ 555, 150, 128, 126, 245, 89, 88, 44, 450, 442, - /* 280 */ 61, 124, 124, 572, 595, 60, 581, 44, 575, 527, - /* 290 */ 578, 28, 18, 169, 170, 605, 581, 162, 606, 29, - /* 300 */ 541, 580, 29, 542, 47, 52, 599, 15, 151, 583, - /* 310 */ 14, 580, 574, 14, 577, 508, 73, 72, 507, 47, - /* 320 */ 53, 44, 22, 209, 522, 152, 523, 22, 140, 520, - /* 330 */ 581, 521, 9, 8, 2, 84, 83, 141, 143, 144, - /* 340 */ 145, 615, 146, 137, 131, 580, 138, 136, 531, 566, - /* 350 */ 98, 565, 164, 562, 561, 165, 231, 548, 547, 189, - /* 360 */ 112, 113, 519, 452, 110, 210, 129, 25, 191, 223, - /* 370 */ 225, 614, 70, 613, 611, 116, 470, 26, 23, 130, - /* 380 */ 439, 91, 79, 437, 81, 435, 434, 537, 194, 198, - /* 390 */ 173, 54, 125, 432, 431, 430, 428, 421, 525, 127, - /* 400 */ 425, 51, 423, 102, 46, 203, 103, 104, 95, 199, - /* 410 */ 201, 535, 197, 30, 536, 549, 195, 27, 222, 75, - /* 420 */ 234, 235, 236, 237, 207, 55, 238, 239, 240, 246, - /* 430 */ 149, 618, 63, 66, 175, 433, 174, 176, 177, 617, - /* 440 */ 180, 427, 119, 90, 118, 471, 117, 120, 122, 121, - /* 450 */ 123, 92, 509, 1, 24, 182, 107, 105, 106, 108, - /* 460 */ 109, 179, 181, 616, 184, 185, 12, 609, 190, 187, - /* 470 */ 13, 157, 96, 538, 99, 196, 58, 4, 19, 543, - /* 480 */ 100, 5, 584, 3, 20, 16, 206, 6, 208, 64, - /* 490 */ 483, 482, 481, 480, 479, 478, 477, 476, 474, 47, - /* 500 */ 447, 449, 67, 22, 504, 221, 503, 501, 56, 468, - /* 510 */ 466, 48, 458, 464, 69, 460, 462, 456, 454, 475, - /* 520 */ 71, 473, 82, 429, 445, 419, 417, 93, 619, 621, - /* 530 */ 94, + /* 0 */ 752, 440, 132, 150, 244, 10, 616, 246, 132, 441, + /* 10 */ 132, 155, 821, 41, 43, 20, 35, 36, 820, 154, + /* 20 */ 821, 29, 741, 440, 200, 39, 37, 40, 38, 131, + /* 30 */ 499, 441, 96, 34, 33, 100, 151, 32, 31, 30, + /* 40 */ 41, 43, 741, 35, 36, 152, 136, 163, 29, 727, + /* 50 */ 749, 200, 39, 37, 40, 38, 185, 100, 225, 224, + /* 60 */ 34, 33, 162, 730, 32, 31, 30, 400, 401, 402, + /* 70 */ 403, 404, 405, 406, 407, 408, 409, 410, 411, 245, + /* 80 */ 730, 41, 43, 188, 35, 36, 215, 236, 197, 29, + /* 90 */ 58, 20, 200, 39, 37, 40, 38, 32, 31, 30, + /* 100 */ 56, 34, 33, 75, 730, 32, 31, 30, 43, 236, + /* 110 */ 35, 36, 776, 817, 195, 29, 20, 20, 200, 39, + /* 120 */ 37, 40, 38, 164, 570, 727, 227, 34, 33, 440, + /* 130 */ 167, 32, 31, 30, 238, 35, 36, 441, 7, 816, + /* 140 */ 29, 61, 110, 200, 39, 37, 40, 38, 223, 228, + /* 150 */ 727, 727, 34, 33, 50, 728, 32, 31, 30, 15, + /* 160 */ 214, 237, 213, 212, 211, 210, 209, 208, 207, 206, + /* 170 */ 712, 51, 701, 702, 703, 704, 705, 706, 707, 708, + /* 180 */ 709, 710, 711, 159, 583, 11, 815, 574, 100, 577, + /* 190 */ 100, 580, 168, 159, 583, 222, 221, 574, 16, 577, + /* 200 */ 20, 580, 34, 33, 145, 26, 32, 31, 30, 238, + /* 210 */ 86, 85, 139, 174, 657, 156, 157, 123, 144, 199, + /* 220 */ 182, 715, 179, 714, 148, 156, 157, 159, 583, 531, + /* 230 */ 60, 574, 149, 577, 726, 580, 237, 16, 39, 37, + /* 240 */ 40, 38, 27, 775, 26, 59, 34, 33, 551, 552, + /* 250 */ 32, 31, 30, 137, 113, 114, 219, 64, 67, 156, + /* 260 */ 157, 95, 515, 666, 184, 512, 123, 513, 26, 514, + /* 270 */ 523, 147, 127, 125, 240, 88, 87, 187, 42, 158, + /* 280 */ 73, 77, 239, 84, 76, 572, 528, 729, 42, 582, + /* 290 */ 79, 17, 658, 165, 166, 123, 243, 242, 92, 582, + /* 300 */ 47, 542, 543, 600, 581, 45, 13, 12, 584, 576, + /* 310 */ 138, 579, 12, 575, 581, 578, 2, 72, 71, 48, + /* 320 */ 505, 573, 42, 743, 45, 504, 204, 9, 8, 21, + /* 330 */ 21, 140, 519, 582, 520, 517, 141, 518, 83, 82, + /* 340 */ 142, 143, 134, 130, 135, 830, 133, 786, 581, 785, + /* 350 */ 160, 782, 781, 161, 751, 721, 768, 226, 97, 767, + /* 360 */ 111, 112, 516, 668, 205, 109, 128, 24, 218, 220, + /* 370 */ 829, 69, 26, 828, 826, 115, 186, 686, 25, 22, + /* 380 */ 90, 129, 655, 78, 653, 80, 651, 650, 169, 538, + /* 390 */ 124, 648, 189, 647, 646, 644, 636, 193, 52, 740, + /* 400 */ 126, 642, 640, 638, 49, 755, 756, 101, 769, 44, + /* 410 */ 198, 196, 194, 28, 192, 190, 217, 74, 229, 230, + /* 420 */ 202, 232, 231, 614, 233, 234, 53, 235, 241, 170, + /* 430 */ 146, 62, 171, 65, 173, 172, 613, 176, 175, 178, + /* 440 */ 649, 177, 612, 89, 91, 117, 687, 118, 116, 119, + /* 450 */ 120, 643, 104, 102, 122, 725, 106, 103, 105, 121, + /* 460 */ 107, 1, 108, 23, 180, 181, 605, 183, 187, 525, + /* 470 */ 55, 539, 153, 98, 57, 191, 18, 63, 4, 544, + /* 480 */ 99, 5, 585, 3, 19, 14, 201, 6, 203, 480, + /* 490 */ 479, 478, 477, 476, 475, 474, 473, 471, 45, 444, + /* 500 */ 66, 446, 21, 501, 216, 68, 500, 498, 54, 465, + /* 510 */ 46, 463, 455, 70, 461, 457, 459, 453, 451, 472, + /* 520 */ 470, 81, 426, 442, 93, 415, 94, 413, 618, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 1, 64, 65, 66, 67, 68, 199, 200, 9, 197, - /* 10 */ 198, 74, 13, 14, 96, 16, 17, 99, 100, 63, - /* 20 */ 21, 1, 1, 24, 25, 26, 27, 28, 78, 9, - /* 30 */ 9, 248, 33, 34, 248, 200, 37, 38, 39, 13, - /* 40 */ 14, 258, 16, 17, 217, 233, 248, 21, 248, 248, - /* 50 */ 24, 25, 26, 27, 28, 257, 258, 257, 258, 33, - /* 60 */ 34, 260, 235, 37, 38, 39, 45, 46, 47, 48, - /* 70 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 200, - /* 80 */ 13, 14, 126, 16, 17, 129, 130, 252, 21, 254, - /* 90 */ 200, 24, 25, 26, 27, 28, 37, 38, 39, 100, - /* 100 */ 33, 34, 111, 112, 37, 38, 39, 14, 200, 16, - /* 110 */ 17, 232, 248, 234, 21, 200, 200, 24, 25, 26, - /* 120 */ 27, 28, 232, 97, 234, 5, 33, 34, 248, 125, - /* 130 */ 37, 38, 39, 60, 16, 17, 132, 229, 134, 21, - /* 140 */ 200, 200, 24, 25, 26, 27, 28, 232, 232, 234, - /* 150 */ 234, 33, 34, 33, 34, 37, 38, 39, 85, 86, - /* 160 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 216, - /* 170 */ 248, 218, 219, 220, 221, 222, 223, 224, 225, 226, - /* 180 */ 227, 228, 1, 2, 44, 200, 5, 248, 7, 249, - /* 190 */ 9, 248, 1, 2, 217, 254, 5, 256, 7, 260, - /* 200 */ 9, 33, 34, 63, 217, 37, 38, 39, 200, 69, - /* 210 */ 70, 71, 235, 60, 33, 34, 200, 77, 37, 60, - /* 220 */ 61, 62, 235, 72, 33, 34, 1, 2, 37, 78, - /* 230 */ 5, 200, 7, 235, 9, 250, 25, 26, 27, 28, - /* 240 */ 87, 248, 234, 103, 33, 34, 101, 1, 37, 38, - /* 250 */ 39, 64, 65, 66, 67, 68, 231, 204, 33, 34, - /* 260 */ 207, 121, 2, 118, 124, 5, 218, 7, 220, 9, - /* 270 */ 254, 131, 64, 65, 66, 67, 68, 96, 204, 204, - /* 280 */ 236, 207, 207, 37, 248, 254, 105, 96, 5, 101, - /* 290 */ 7, 247, 104, 33, 34, 97, 105, 59, 97, 101, - /* 300 */ 97, 120, 101, 97, 101, 101, 97, 101, 248, 97, - /* 310 */ 101, 120, 5, 101, 7, 97, 127, 128, 97, 101, - /* 320 */ 116, 96, 101, 97, 5, 248, 7, 101, 248, 5, - /* 330 */ 105, 7, 127, 128, 96, 72, 73, 248, 248, 248, - /* 340 */ 248, 235, 248, 248, 248, 120, 248, 248, 200, 230, - /* 350 */ 200, 230, 230, 230, 230, 230, 230, 255, 255, 123, - /* 360 */ 200, 200, 102, 200, 237, 200, 200, 200, 259, 200, - /* 370 */ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - /* 380 */ 200, 59, 200, 200, 200, 200, 200, 105, 251, 251, - /* 390 */ 200, 115, 200, 200, 200, 200, 200, 200, 246, 200, - /* 400 */ 200, 117, 200, 245, 114, 109, 244, 243, 201, 108, - /* 410 */ 113, 201, 107, 119, 201, 201, 106, 103, 75, 84, - /* 420 */ 83, 49, 80, 82, 201, 201, 53, 81, 79, 75, - /* 430 */ 201, 5, 205, 205, 5, 201, 133, 133, 58, 5, - /* 440 */ 5, 201, 209, 202, 213, 215, 214, 212, 211, 210, - /* 450 */ 208, 202, 233, 206, 203, 58, 240, 242, 241, 239, - /* 460 */ 238, 133, 133, 5, 133, 58, 96, 86, 123, 125, - /* 470 */ 96, 1, 122, 97, 96, 96, 101, 110, 101, 97, - /* 480 */ 96, 110, 97, 96, 101, 96, 98, 96, 98, 72, - /* 490 */ 9, 5, 5, 5, 5, 1, 5, 5, 5, 101, - /* 500 */ 76, 58, 72, 101, 5, 15, 5, 97, 96, 5, - /* 510 */ 5, 16, 5, 5, 128, 5, 5, 5, 5, 5, - /* 520 */ 128, 5, 58, 58, 76, 59, 58, 21, 0, 261, - /* 530 */ 21, + /* 0 */ 207, 1, 256, 206, 207, 256, 204, 205, 256, 9, + /* 10 */ 256, 265, 266, 13, 14, 207, 16, 17, 266, 265, + /* 20 */ 266, 21, 240, 1, 24, 25, 26, 27, 28, 256, + /* 30 */ 5, 9, 207, 33, 34, 207, 254, 37, 38, 39, + /* 40 */ 13, 14, 240, 16, 17, 224, 256, 239, 21, 241, + /* 50 */ 257, 24, 25, 26, 27, 28, 254, 207, 33, 34, + /* 60 */ 33, 34, 224, 242, 37, 38, 39, 45, 46, 47, + /* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 80 */ 242, 13, 14, 258, 16, 17, 224, 78, 260, 21, + /* 90 */ 262, 207, 24, 25, 26, 27, 28, 37, 38, 39, + /* 100 */ 100, 33, 34, 72, 242, 37, 38, 39, 14, 78, + /* 110 */ 16, 17, 262, 256, 264, 21, 207, 207, 24, 25, + /* 120 */ 26, 27, 28, 239, 97, 241, 207, 33, 34, 1, + /* 130 */ 63, 37, 38, 39, 60, 16, 17, 9, 96, 256, + /* 140 */ 21, 99, 100, 24, 25, 26, 27, 28, 239, 239, + /* 150 */ 241, 241, 33, 34, 101, 236, 37, 38, 39, 85, + /* 160 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + /* 170 */ 223, 118, 225, 226, 227, 228, 229, 230, 231, 232, + /* 180 */ 233, 234, 235, 1, 2, 44, 256, 5, 207, 7, + /* 190 */ 207, 9, 125, 1, 2, 128, 129, 5, 96, 7, + /* 200 */ 207, 9, 33, 34, 63, 103, 37, 38, 39, 60, + /* 210 */ 69, 70, 71, 124, 211, 33, 34, 214, 77, 37, + /* 220 */ 131, 225, 133, 227, 256, 33, 34, 1, 2, 37, + /* 230 */ 243, 5, 256, 7, 241, 9, 87, 96, 25, 26, + /* 240 */ 27, 28, 255, 262, 103, 262, 33, 34, 113, 114, + /* 250 */ 37, 38, 39, 256, 64, 65, 66, 67, 68, 33, + /* 260 */ 34, 96, 2, 211, 123, 5, 214, 7, 103, 9, + /* 270 */ 97, 130, 64, 65, 66, 67, 68, 104, 96, 59, + /* 280 */ 64, 65, 66, 67, 68, 1, 101, 242, 96, 107, + /* 290 */ 74, 106, 211, 33, 34, 214, 60, 61, 62, 107, + /* 300 */ 101, 97, 97, 97, 122, 101, 101, 101, 97, 5, + /* 310 */ 256, 7, 101, 5, 122, 7, 96, 126, 127, 120, + /* 320 */ 97, 37, 96, 240, 101, 97, 97, 126, 127, 101, + /* 330 */ 101, 256, 5, 107, 7, 5, 256, 7, 72, 73, + /* 340 */ 256, 256, 256, 256, 256, 242, 256, 237, 122, 237, + /* 350 */ 237, 237, 237, 237, 207, 238, 263, 237, 207, 263, + /* 360 */ 207, 207, 102, 207, 207, 244, 207, 207, 207, 207, + /* 370 */ 207, 207, 103, 207, 207, 207, 240, 207, 207, 207, + /* 380 */ 59, 207, 207, 207, 207, 207, 207, 207, 207, 107, + /* 390 */ 207, 207, 259, 207, 207, 207, 207, 259, 117, 253, + /* 400 */ 207, 207, 207, 207, 119, 208, 208, 252, 208, 116, + /* 410 */ 111, 115, 110, 121, 109, 108, 75, 84, 83, 49, + /* 420 */ 208, 82, 80, 5, 53, 81, 208, 79, 75, 132, + /* 430 */ 208, 212, 5, 212, 58, 132, 5, 5, 132, 58, + /* 440 */ 208, 132, 5, 209, 209, 220, 222, 216, 221, 219, + /* 450 */ 217, 208, 249, 251, 215, 240, 247, 250, 248, 218, + /* 460 */ 246, 213, 245, 210, 132, 58, 86, 124, 104, 97, + /* 470 */ 105, 97, 1, 96, 101, 96, 101, 72, 112, 97, + /* 480 */ 96, 112, 97, 96, 101, 96, 98, 96, 98, 9, + /* 490 */ 5, 5, 5, 5, 1, 5, 5, 5, 101, 76, + /* 500 */ 72, 58, 101, 5, 15, 127, 5, 97, 96, 5, + /* 510 */ 16, 5, 5, 127, 5, 5, 5, 5, 5, 5, + /* 520 */ 5, 58, 58, 76, 21, 59, 21, 58, 0, 267, + /* 530 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 540 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 550 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 560 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 570 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 580 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 590 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 600 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 610 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 620 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 630 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 640 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 650 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 660 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 670 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 680 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 690 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 700 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 710 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 720 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, + /* 730 */ 267, 267, }; -#define YY_SHIFT_USE_DFLT (-83) -#define YY_SHIFT_COUNT (251) -#define YY_SHIFT_MIN (-82) -#define YY_SHIFT_MAX (528) -static const short yy_shift_ofst[] = { - /* 0 */ 140, 73, 181, 225, 20, 20, 20, 20, 20, 20, - /* 10 */ -1, 21, 225, 225, 225, 260, 260, 260, 20, 20, - /* 20 */ 20, 20, 20, 151, 153, -50, -50, -83, 191, 225, - /* 30 */ 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, - /* 40 */ 225, 225, 225, 225, 225, 225, 225, 260, 260, 120, - /* 50 */ 120, 120, 120, 120, 120, -82, 120, 20, 20, -9, - /* 60 */ -9, 188, 20, 20, 20, 20, 20, 20, 20, 20, - /* 70 */ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - /* 80 */ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - /* 90 */ 20, 20, 20, 20, 20, 236, 322, 322, 322, 282, - /* 100 */ 282, 322, 276, 284, 290, 296, 297, 301, 305, 310, - /* 110 */ 294, 314, 322, 322, 343, 343, 322, 335, 337, 372, - /* 120 */ 342, 341, 373, 346, 349, 322, 354, 322, 354, -83, - /* 130 */ -83, 26, 67, 67, 67, 67, 67, 93, 118, 211, - /* 140 */ 211, 211, -63, 168, 168, 168, 168, 187, 208, -44, - /* 150 */ 4, 59, 59, 159, 198, 201, 203, 206, 209, 212, - /* 160 */ 283, 307, 246, 238, 145, 204, 218, 221, 226, 319, - /* 170 */ 324, 189, 205, 263, 426, 303, 429, 304, 380, 434, - /* 180 */ 328, 435, 329, 397, 458, 331, 407, 381, 344, 370, - /* 190 */ 374, 345, 350, 375, 376, 378, 470, 379, 382, 384, - /* 200 */ 377, 367, 383, 371, 385, 387, 389, 388, 391, 390, - /* 210 */ 417, 481, 486, 487, 488, 489, 494, 491, 492, 493, - /* 220 */ 398, 424, 490, 430, 443, 495, 386, 392, 402, 499, - /* 230 */ 501, 410, 412, 402, 504, 505, 507, 508, 510, 511, - /* 240 */ 512, 513, 514, 516, 464, 465, 448, 506, 509, 466, - /* 250 */ 468, 528, +#define YY_SHIFT_COUNT (246) +#define YY_SHIFT_MIN (0) +#define YY_SHIFT_MAX (528) +static const unsigned short int yy_shift_ofst[] = { + /* 0 */ 141, 74, 182, 226, 128, 128, 128, 128, 128, 128, + /* 10 */ 0, 22, 226, 260, 260, 260, 102, 128, 128, 128, + /* 20 */ 128, 128, 31, 149, 9, 9, 529, 192, 226, 226, + /* 30 */ 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, + /* 40 */ 226, 226, 226, 226, 226, 260, 260, 25, 25, 25, + /* 50 */ 25, 25, 25, 42, 25, 165, 128, 128, 135, 135, + /* 60 */ 185, 128, 128, 128, 128, 128, 128, 128, 128, 128, + /* 70 */ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + /* 80 */ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + /* 90 */ 128, 128, 128, 128, 128, 269, 321, 321, 282, 282, + /* 100 */ 321, 281, 285, 293, 299, 296, 302, 305, 307, 292, + /* 110 */ 269, 321, 321, 341, 341, 321, 333, 335, 370, 342, + /* 120 */ 339, 371, 344, 348, 321, 353, 321, 353, 529, 529, + /* 130 */ 27, 68, 68, 68, 94, 119, 213, 213, 213, 216, + /* 140 */ 169, 169, 169, 169, 190, 208, 67, 89, 60, 60, + /* 150 */ 236, 173, 204, 205, 206, 211, 304, 308, 284, 220, + /* 160 */ 199, 53, 223, 228, 229, 327, 330, 191, 201, 266, + /* 170 */ 418, 297, 427, 303, 376, 431, 306, 432, 309, 381, + /* 180 */ 437, 332, 407, 380, 343, 364, 372, 365, 373, 374, + /* 190 */ 377, 471, 379, 382, 384, 375, 366, 383, 369, 385, + /* 200 */ 387, 389, 388, 391, 390, 405, 480, 485, 486, 487, + /* 210 */ 488, 493, 490, 491, 492, 397, 423, 489, 428, 443, + /* 220 */ 494, 378, 386, 401, 498, 501, 410, 412, 401, 504, + /* 230 */ 506, 507, 509, 510, 511, 512, 513, 514, 515, 463, + /* 240 */ 464, 447, 503, 505, 466, 469, 528, }; -#define YY_REDUCE_USE_DFLT (-218) -#define YY_REDUCE_COUNT (130) -#define YY_REDUCE_MIN (-217) -#define YY_REDUCE_MAX (251) +#define YY_REDUCE_COUNT (129) +#define YY_REDUCE_MIN (-254) +#define YY_REDUCE_MAX (253) static const short yy_reduce_ofst[] = { - /* 0 */ -188, -47, -202, -200, -59, -165, -121, -110, -85, -84, - /* 10 */ -60, -193, -199, -61, -217, -173, -23, -13, -15, 16, - /* 20 */ 31, -92, 8, 53, 48, 74, 75, 44, -214, -136, - /* 30 */ -120, -78, -57, -7, 36, 60, 77, 80, 89, 90, - /* 40 */ 91, 92, 94, 95, 96, 98, 99, -2, 106, 119, - /* 50 */ 121, 122, 123, 124, 125, 25, 126, 148, 150, 102, - /* 60 */ 103, 127, 160, 161, 163, 165, 166, 167, 169, 170, - /* 70 */ 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, - /* 80 */ 182, 183, 184, 185, 186, 190, 192, 193, 194, 195, - /* 90 */ 196, 197, 199, 200, 202, 109, 207, 210, 213, 137, - /* 100 */ 138, 214, 152, 158, 162, 164, 215, 217, 216, 220, - /* 110 */ 222, 219, 223, 224, 227, 228, 229, 230, 232, 231, - /* 120 */ 233, 235, 239, 237, 242, 234, 241, 240, 249, 247, - /* 130 */ 251, + /* 0 */ -198, -53, -254, -246, -150, -172, -192, -116, -91, -90, + /* 10 */ -207, -203, -248, -179, -162, -138, -218, -175, -19, -17, + /* 20 */ -81, -7, 3, -4, 52, 81, -13, -251, -227, -210, + /* 30 */ -143, -117, -70, -32, -24, -3, 54, 75, 80, 84, + /* 40 */ 85, 86, 87, 88, 90, 45, 103, 110, 112, 113, + /* 50 */ 114, 115, 116, 117, 120, 83, 147, 151, 93, 96, + /* 60 */ 121, 153, 154, 156, 157, 159, 160, 161, 162, 163, + /* 70 */ 164, 166, 167, 168, 170, 171, 172, 174, 175, 176, + /* 80 */ 177, 178, 179, 180, 181, 183, 184, 186, 187, 188, + /* 90 */ 189, 193, 194, 195, 196, 136, 197, 198, 133, 138, + /* 100 */ 200, 146, 155, 202, 207, 203, 210, 209, 214, 217, + /* 110 */ 215, 212, 218, 219, 221, 222, 224, 227, 225, 231, + /* 120 */ 230, 233, 241, 239, 232, 234, 243, 235, 248, 253, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 835, 667, 819, 819, 835, 835, 835, 835, 835, 835, - /* 10 */ 749, 634, 835, 835, 819, 835, 835, 835, 835, 835, - /* 20 */ 835, 835, 835, 669, 656, 669, 669, 744, 835, 835, - /* 30 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, - /* 40 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, - /* 50 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 768, - /* 60 */ 768, 742, 835, 835, 835, 835, 835, 835, 835, 835, - /* 70 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 654, - /* 80 */ 835, 652, 835, 835, 835, 835, 835, 835, 835, 835, - /* 90 */ 835, 835, 835, 835, 835, 835, 636, 636, 636, 835, - /* 100 */ 835, 636, 775, 779, 773, 761, 769, 760, 756, 755, - /* 110 */ 783, 835, 636, 636, 664, 664, 636, 685, 683, 681, - /* 120 */ 673, 679, 675, 677, 671, 636, 662, 636, 662, 700, - /* 130 */ 713, 835, 823, 824, 784, 818, 774, 802, 801, 814, - /* 140 */ 808, 807, 835, 806, 805, 804, 803, 835, 835, 835, - /* 150 */ 835, 810, 809, 835, 835, 835, 835, 835, 835, 835, - /* 160 */ 835, 835, 835, 786, 780, 776, 835, 835, 835, 835, - /* 170 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, - /* 180 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, - /* 190 */ 835, 820, 835, 750, 835, 835, 835, 835, 835, 835, - /* 200 */ 770, 835, 762, 835, 835, 835, 835, 835, 835, 722, - /* 210 */ 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, - /* 220 */ 688, 835, 835, 835, 835, 835, 835, 835, 828, 835, - /* 230 */ 835, 835, 716, 826, 835, 835, 835, 835, 835, 835, - /* 240 */ 835, 835, 835, 835, 835, 835, 835, 640, 638, 835, - /* 250 */ 632, 835, + /* 0 */ 615, 667, 823, 823, 615, 615, 615, 615, 615, 615, + /* 10 */ 753, 633, 823, 615, 615, 615, 615, 615, 615, 615, + /* 20 */ 615, 615, 669, 656, 669, 669, 748, 615, 615, 615, + /* 30 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 40 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 50 */ 615, 615, 615, 615, 615, 615, 615, 615, 772, 772, + /* 60 */ 746, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 70 */ 615, 615, 615, 615, 615, 615, 615, 615, 654, 615, + /* 80 */ 652, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 90 */ 615, 615, 641, 615, 615, 615, 635, 635, 615, 615, + /* 100 */ 635, 779, 783, 777, 765, 773, 764, 760, 759, 787, + /* 110 */ 615, 635, 635, 664, 664, 635, 685, 683, 681, 673, + /* 120 */ 679, 675, 677, 671, 635, 662, 635, 662, 700, 713, + /* 130 */ 615, 788, 822, 778, 806, 805, 818, 812, 811, 615, + /* 140 */ 810, 809, 808, 807, 615, 615, 615, 615, 814, 813, + /* 150 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 790, + /* 160 */ 784, 780, 615, 615, 615, 615, 615, 615, 615, 615, + /* 170 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 180 */ 615, 615, 615, 615, 615, 745, 615, 615, 754, 615, + /* 190 */ 615, 615, 615, 615, 615, 774, 615, 766, 615, 615, + /* 200 */ 615, 615, 615, 615, 722, 615, 615, 615, 615, 615, + /* 210 */ 615, 615, 615, 615, 615, 688, 615, 615, 615, 615, + /* 220 */ 615, 615, 615, 827, 615, 615, 615, 716, 825, 615, + /* 230 */ 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, + /* 240 */ 615, 615, 639, 637, 615, 631, 615, }; /********** End of lemon-generated parsing tables *****************************/ @@ -516,6 +527,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* COMMA => nothing */ 1, /* NULL => ID */ 0, /* SELECT => nothing */ + 0, /* UNION => nothing */ + 1, /* ALL => ID */ 0, /* FROM => nothing */ 0, /* VARIABLE => nothing */ 0, /* INTERVAL => nothing */ @@ -533,9 +546,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* SOFFSET => nothing */ 0, /* WHERE => nothing */ 1, /* NOW => ID */ - 0, /* INSERT => nothing */ - 0, /* INTO => nothing */ - 0, /* VALUES => nothing */ 0, /* RESET => nothing */ 0, /* QUERY => nothing */ 0, /* ADD => nothing */ @@ -578,7 +588,6 @@ static const YYCODETYPE yyFallback[] = { 1, /* STATEMENT => ID */ 1, /* TRIGGER => ID */ 1, /* VIEW => ID */ - 1, /* ALL => ID */ 1, /* COUNT => ID */ 1, /* SUM => ID */ 1, /* AVG => ID */ @@ -598,6 +607,12 @@ static const YYCODETYPE yyFallback[] = { 1, /* TWA => ID */ 1, /* INTERP => ID */ 1, /* LAST_ROW => ID */ + 1, /* RATE => ID */ + 1, /* IRATE => ID */ + 1, /* SUM_RATE => ID */ + 1, /* SUM_IRATE => ID */ + 1, /* AVG_RATE => ID */ + 1, /* AVG_IRATE => ID */ 1, /* SEMI => ID */ 1, /* NONE => ID */ 1, /* PREV => ID */ @@ -608,6 +623,9 @@ static const YYCODETYPE yyFallback[] = { 1, /* JOIN => ID */ 1, /* METRICS => ID */ 1, /* STABLE => ID */ + 1, /* INSERT => ID */ + 1, /* INTO => ID */ + 1, /* VALUES => ID */ }; #endif /* YYFALLBACK */ @@ -639,17 +657,21 @@ typedef struct yyStackEntry yyStackEntry; /* The state of the parser is completely contained in an instance of ** the following structure */ struct yyParser { - int yyidx; /* Index of top element in stack */ + yyStackEntry *yytos; /* Pointer to top element of the stack */ #ifdef YYTRACKMAXSTACKDEPTH - int yyidxMax; /* Maximum value of yyidx */ + int yyhwm; /* High-water mark of the stack */ #endif +#ifndef YYNOERRORRECOVERY int yyerrcnt; /* Shifts left before out of the error */ +#endif ParseARG_SDECL /* A place to hold %extra_argument */ #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ + yyStackEntry yystk0; /* First stack entry */ #else yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ + yyStackEntry *yystackEnd; /* Last entry in the stack */ #endif }; typedef struct yyParser yyParser; @@ -686,78 +708,279 @@ void ParseTrace(FILE *TraceFILE, char *zTracePrompt){ } #endif /* NDEBUG */ -#ifndef NDEBUG +#if defined(YYCOVERAGE) || !defined(NDEBUG) /* For tracing shifts, the names of all terminals and nonterminals ** are required. The following table supplies these names */ static const char *const yyTokenName[] = { - "$", "ID", "BOOL", "TINYINT", - "SMALLINT", "INTEGER", "BIGINT", "FLOAT", - "DOUBLE", "STRING", "TIMESTAMP", "BINARY", - "NCHAR", "OR", "AND", "NOT", - "EQ", "NE", "ISNULL", "NOTNULL", - "IS", "LIKE", "GLOB", "BETWEEN", - "IN", "GT", "GE", "LT", - "LE", "BITAND", "BITOR", "LSHIFT", - "RSHIFT", "PLUS", "MINUS", "DIVIDE", - "TIMES", "STAR", "SLASH", "REM", - "CONCAT", "UMINUS", "UPLUS", "BITNOT", - "SHOW", "DATABASES", "MNODES", "DNODES", - "ACCOUNTS", "USERS", "MODULES", "QUERIES", - "CONNECTIONS", "STREAMS", "CONFIGS", "SCORES", - "GRANTS", "VNODES", "IPTOKEN", "DOT", - "TABLES", "STABLES", "VGROUPS", "DROP", - "TABLE", "DATABASE", "DNODE", "USER", - "ACCOUNT", "USE", "DESCRIBE", "ALTER", - "PASS", "PRIVILEGE", "LOCAL", "IF", - "EXISTS", "CREATE", "PPS", "TSERIES", - "DBS", "STORAGE", "QTIME", "CONNS", - "STATE", "KEEP", "CACHE", "REPLICA", - "DAYS", "ROWS", "ABLOCKS", "TBLOCKS", - "CTIME", "CLOG", "COMP", "PRECISION", - "LP", "RP", "TAGS", "USING", - "AS", "COMMA", "NULL", "SELECT", - "FROM", "VARIABLE", "INTERVAL", "FILL", - "SLIDING", "ORDER", "BY", "ASC", - "DESC", "GROUP", "HAVING", "LIMIT", - "OFFSET", "SLIMIT", "SOFFSET", "WHERE", - "NOW", "INSERT", "INTO", "VALUES", - "RESET", "QUERY", "ADD", "COLUMN", - "TAG", "CHANGE", "SET", "KILL", - "CONNECTION", "COLON", "STREAM", "ABORT", - "AFTER", "ATTACH", "BEFORE", "BEGIN", - "CASCADE", "CLUSTER", "CONFLICT", "COPY", - "DEFERRED", "DELIMITERS", "DETACH", "EACH", - "END", "EXPLAIN", "FAIL", "FOR", - "IGNORE", "IMMEDIATE", "INITIALLY", "INSTEAD", - "MATCH", "KEY", "OF", "RAISE", - "REPLACE", "RESTRICT", "ROW", "STATEMENT", - "TRIGGER", "VIEW", "ALL", "COUNT", - "SUM", "AVG", "MIN", "MAX", - "FIRST", "LAST", "TOP", "BOTTOM", - "STDDEV", "PERCENTILE", "APERCENTILE", "LEASTSQUARES", - "HISTOGRAM", "DIFF", "SPREAD", "TWA", - "INTERP", "LAST_ROW", "SEMI", "NONE", - "PREV", "LINEAR", "IMPORT", "METRIC", - "TBNAME", "JOIN", "METRICS", "STABLE", - "error", "program", "cmd", "dbPrefix", - "ids", "cpxName", "ifexists", "alter_db_optr", - "acct_optr", "ifnotexists", "db_optr", "pps", - "tseries", "dbs", "streams", "storage", - "qtime", "users", "conns", "state", - "keep", "tagitemlist", "tables", "cache", - "replica", "days", "rows", "ablocks", - "tblocks", "ctime", "clog", "comp", - "prec", "typename", "signed", "create_table_args", - "columnlist", "select", "column", "tagitem", - "selcollist", "from", "where_opt", "interval_opt", - "fill_opt", "sliding_opt", "groupby_opt", "orderby_opt", - "having_opt", "slimit_opt", "limit_opt", "sclp", - "expr", "as", "tablelist", "tmvar", - "sortlist", "sortitem", "item", "sortorder", - "grouplist", "exprlist", "expritem", "insert_value_list", - "itemlist", + /* 0 */ "$", + /* 1 */ "ID", + /* 2 */ "BOOL", + /* 3 */ "TINYINT", + /* 4 */ "SMALLINT", + /* 5 */ "INTEGER", + /* 6 */ "BIGINT", + /* 7 */ "FLOAT", + /* 8 */ "DOUBLE", + /* 9 */ "STRING", + /* 10 */ "TIMESTAMP", + /* 11 */ "BINARY", + /* 12 */ "NCHAR", + /* 13 */ "OR", + /* 14 */ "AND", + /* 15 */ "NOT", + /* 16 */ "EQ", + /* 17 */ "NE", + /* 18 */ "ISNULL", + /* 19 */ "NOTNULL", + /* 20 */ "IS", + /* 21 */ "LIKE", + /* 22 */ "GLOB", + /* 23 */ "BETWEEN", + /* 24 */ "IN", + /* 25 */ "GT", + /* 26 */ "GE", + /* 27 */ "LT", + /* 28 */ "LE", + /* 29 */ "BITAND", + /* 30 */ "BITOR", + /* 31 */ "LSHIFT", + /* 32 */ "RSHIFT", + /* 33 */ "PLUS", + /* 34 */ "MINUS", + /* 35 */ "DIVIDE", + /* 36 */ "TIMES", + /* 37 */ "STAR", + /* 38 */ "SLASH", + /* 39 */ "REM", + /* 40 */ "CONCAT", + /* 41 */ "UMINUS", + /* 42 */ "UPLUS", + /* 43 */ "BITNOT", + /* 44 */ "SHOW", + /* 45 */ "DATABASES", + /* 46 */ "MNODES", + /* 47 */ "DNODES", + /* 48 */ "ACCOUNTS", + /* 49 */ "USERS", + /* 50 */ "MODULES", + /* 51 */ "QUERIES", + /* 52 */ "CONNECTIONS", + /* 53 */ "STREAMS", + /* 54 */ "CONFIGS", + /* 55 */ "SCORES", + /* 56 */ "GRANTS", + /* 57 */ "VNODES", + /* 58 */ "IPTOKEN", + /* 59 */ "DOT", + /* 60 */ "TABLES", + /* 61 */ "STABLES", + /* 62 */ "VGROUPS", + /* 63 */ "DROP", + /* 64 */ "TABLE", + /* 65 */ "DATABASE", + /* 66 */ "DNODE", + /* 67 */ "USER", + /* 68 */ "ACCOUNT", + /* 69 */ "USE", + /* 70 */ "DESCRIBE", + /* 71 */ "ALTER", + /* 72 */ "PASS", + /* 73 */ "PRIVILEGE", + /* 74 */ "LOCAL", + /* 75 */ "IF", + /* 76 */ "EXISTS", + /* 77 */ "CREATE", + /* 78 */ "PPS", + /* 79 */ "TSERIES", + /* 80 */ "DBS", + /* 81 */ "STORAGE", + /* 82 */ "QTIME", + /* 83 */ "CONNS", + /* 84 */ "STATE", + /* 85 */ "KEEP", + /* 86 */ "CACHE", + /* 87 */ "REPLICA", + /* 88 */ "DAYS", + /* 89 */ "ROWS", + /* 90 */ "ABLOCKS", + /* 91 */ "TBLOCKS", + /* 92 */ "CTIME", + /* 93 */ "CLOG", + /* 94 */ "COMP", + /* 95 */ "PRECISION", + /* 96 */ "LP", + /* 97 */ "RP", + /* 98 */ "TAGS", + /* 99 */ "USING", + /* 100 */ "AS", + /* 101 */ "COMMA", + /* 102 */ "NULL", + /* 103 */ "SELECT", + /* 104 */ "UNION", + /* 105 */ "ALL", + /* 106 */ "FROM", + /* 107 */ "VARIABLE", + /* 108 */ "INTERVAL", + /* 109 */ "FILL", + /* 110 */ "SLIDING", + /* 111 */ "ORDER", + /* 112 */ "BY", + /* 113 */ "ASC", + /* 114 */ "DESC", + /* 115 */ "GROUP", + /* 116 */ "HAVING", + /* 117 */ "LIMIT", + /* 118 */ "OFFSET", + /* 119 */ "SLIMIT", + /* 120 */ "SOFFSET", + /* 121 */ "WHERE", + /* 122 */ "NOW", + /* 123 */ "RESET", + /* 124 */ "QUERY", + /* 125 */ "ADD", + /* 126 */ "COLUMN", + /* 127 */ "TAG", + /* 128 */ "CHANGE", + /* 129 */ "SET", + /* 130 */ "KILL", + /* 131 */ "CONNECTION", + /* 132 */ "COLON", + /* 133 */ "STREAM", + /* 134 */ "ABORT", + /* 135 */ "AFTER", + /* 136 */ "ATTACH", + /* 137 */ "BEFORE", + /* 138 */ "BEGIN", + /* 139 */ "CASCADE", + /* 140 */ "CLUSTER", + /* 141 */ "CONFLICT", + /* 142 */ "COPY", + /* 143 */ "DEFERRED", + /* 144 */ "DELIMITERS", + /* 145 */ "DETACH", + /* 146 */ "EACH", + /* 147 */ "END", + /* 148 */ "EXPLAIN", + /* 149 */ "FAIL", + /* 150 */ "FOR", + /* 151 */ "IGNORE", + /* 152 */ "IMMEDIATE", + /* 153 */ "INITIALLY", + /* 154 */ "INSTEAD", + /* 155 */ "MATCH", + /* 156 */ "KEY", + /* 157 */ "OF", + /* 158 */ "RAISE", + /* 159 */ "REPLACE", + /* 160 */ "RESTRICT", + /* 161 */ "ROW", + /* 162 */ "STATEMENT", + /* 163 */ "TRIGGER", + /* 164 */ "VIEW", + /* 165 */ "COUNT", + /* 166 */ "SUM", + /* 167 */ "AVG", + /* 168 */ "MIN", + /* 169 */ "MAX", + /* 170 */ "FIRST", + /* 171 */ "LAST", + /* 172 */ "TOP", + /* 173 */ "BOTTOM", + /* 174 */ "STDDEV", + /* 175 */ "PERCENTILE", + /* 176 */ "APERCENTILE", + /* 177 */ "LEASTSQUARES", + /* 178 */ "HISTOGRAM", + /* 179 */ "DIFF", + /* 180 */ "SPREAD", + /* 181 */ "TWA", + /* 182 */ "INTERP", + /* 183 */ "LAST_ROW", + /* 184 */ "RATE", + /* 185 */ "IRATE", + /* 186 */ "SUM_RATE", + /* 187 */ "SUM_IRATE", + /* 188 */ "AVG_RATE", + /* 189 */ "AVG_IRATE", + /* 190 */ "SEMI", + /* 191 */ "NONE", + /* 192 */ "PREV", + /* 193 */ "LINEAR", + /* 194 */ "IMPORT", + /* 195 */ "METRIC", + /* 196 */ "TBNAME", + /* 197 */ "JOIN", + /* 198 */ "METRICS", + /* 199 */ "STABLE", + /* 200 */ "INSERT", + /* 201 */ "INTO", + /* 202 */ "VALUES", + /* 203 */ "error", + /* 204 */ "program", + /* 205 */ "cmd", + /* 206 */ "dbPrefix", + /* 207 */ "ids", + /* 208 */ "cpxName", + /* 209 */ "ifexists", + /* 210 */ "alter_db_optr", + /* 211 */ "acct_optr", + /* 212 */ "ifnotexists", + /* 213 */ "db_optr", + /* 214 */ "pps", + /* 215 */ "tseries", + /* 216 */ "dbs", + /* 217 */ "streams", + /* 218 */ "storage", + /* 219 */ "qtime", + /* 220 */ "users", + /* 221 */ "conns", + /* 222 */ "state", + /* 223 */ "keep", + /* 224 */ "tagitemlist", + /* 225 */ "tables", + /* 226 */ "cache", + /* 227 */ "replica", + /* 228 */ "days", + /* 229 */ "rows", + /* 230 */ "ablocks", + /* 231 */ "tblocks", + /* 232 */ "ctime", + /* 233 */ "clog", + /* 234 */ "comp", + /* 235 */ "prec", + /* 236 */ "typename", + /* 237 */ "signed", + /* 238 */ "create_table_args", + /* 239 */ "columnlist", + /* 240 */ "select", + /* 241 */ "column", + /* 242 */ "tagitem", + /* 243 */ "selcollist", + /* 244 */ "from", + /* 245 */ "where_opt", + /* 246 */ "interval_opt", + /* 247 */ "fill_opt", + /* 248 */ "sliding_opt", + /* 249 */ "groupby_opt", + /* 250 */ "orderby_opt", + /* 251 */ "having_opt", + /* 252 */ "slimit_opt", + /* 253 */ "limit_opt", + /* 254 */ "union", + /* 255 */ "sclp", + /* 256 */ "expr", + /* 257 */ "as", + /* 258 */ "tablelist", + /* 259 */ "tmvar", + /* 260 */ "sortlist", + /* 261 */ "sortitem", + /* 262 */ "item", + /* 263 */ "sortorder", + /* 264 */ "grouplist", + /* 265 */ "exprlist", + /* 266 */ "expritem", }; -#endif /* NDEBUG */ +#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ #ifndef NDEBUG /* For tracing reduce actions, the names of all rules are required. @@ -787,188 +1010,188 @@ static const char *const yyRuleName[] = { /* 21 */ "cmd ::= SHOW dbPrefix STABLES", /* 22 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", /* 23 */ "cmd ::= SHOW dbPrefix VGROUPS", - /* 24 */ "cmd ::= DROP TABLE ifexists ids cpxName", - /* 25 */ "cmd ::= DROP DATABASE ifexists ids", - /* 26 */ "cmd ::= DROP DNODE IPTOKEN", - /* 27 */ "cmd ::= DROP USER ids", - /* 28 */ "cmd ::= DROP ACCOUNT ids", - /* 29 */ "cmd ::= USE ids", - /* 30 */ "cmd ::= DESCRIBE ids cpxName", - /* 31 */ "cmd ::= ALTER USER ids PASS ids", - /* 32 */ "cmd ::= ALTER USER ids PRIVILEGE ids", - /* 33 */ "cmd ::= ALTER DNODE IPTOKEN ids", - /* 34 */ "cmd ::= ALTER DNODE IPTOKEN ids ids", - /* 35 */ "cmd ::= ALTER LOCAL ids", - /* 36 */ "cmd ::= ALTER LOCAL ids ids", - /* 37 */ "cmd ::= ALTER DATABASE ids alter_db_optr", - /* 38 */ "cmd ::= ALTER ACCOUNT ids acct_optr", - /* 39 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", - /* 40 */ "ids ::= ID", - /* 41 */ "ids ::= STRING", - /* 42 */ "ifexists ::= IF EXISTS", - /* 43 */ "ifexists ::=", - /* 44 */ "ifnotexists ::= IF NOT EXISTS", - /* 45 */ "ifnotexists ::=", - /* 46 */ "cmd ::= CREATE DNODE IPTOKEN", - /* 47 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", - /* 48 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", - /* 49 */ "cmd ::= CREATE USER ids PASS ids", - /* 50 */ "pps ::=", - /* 51 */ "pps ::= PPS INTEGER", - /* 52 */ "tseries ::=", - /* 53 */ "tseries ::= TSERIES INTEGER", - /* 54 */ "dbs ::=", - /* 55 */ "dbs ::= DBS INTEGER", - /* 56 */ "streams ::=", - /* 57 */ "streams ::= STREAMS INTEGER", - /* 58 */ "storage ::=", - /* 59 */ "storage ::= STORAGE INTEGER", - /* 60 */ "qtime ::=", - /* 61 */ "qtime ::= QTIME INTEGER", - /* 62 */ "users ::=", - /* 63 */ "users ::= USERS INTEGER", - /* 64 */ "conns ::=", - /* 65 */ "conns ::= CONNS INTEGER", - /* 66 */ "state ::=", - /* 67 */ "state ::= STATE ids", - /* 68 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", - /* 69 */ "keep ::= KEEP tagitemlist", - /* 70 */ "tables ::= TABLES INTEGER", - /* 71 */ "cache ::= CACHE INTEGER", - /* 72 */ "replica ::= REPLICA INTEGER", - /* 73 */ "days ::= DAYS INTEGER", - /* 74 */ "rows ::= ROWS INTEGER", - /* 75 */ "ablocks ::= ABLOCKS ID", - /* 76 */ "tblocks ::= TBLOCKS INTEGER", - /* 77 */ "ctime ::= CTIME INTEGER", - /* 78 */ "clog ::= CLOG INTEGER", - /* 79 */ "comp ::= COMP INTEGER", - /* 80 */ "prec ::= PRECISION STRING", - /* 81 */ "db_optr ::=", - /* 82 */ "db_optr ::= db_optr tables", - /* 83 */ "db_optr ::= db_optr cache", - /* 84 */ "db_optr ::= db_optr replica", - /* 85 */ "db_optr ::= db_optr days", - /* 86 */ "db_optr ::= db_optr rows", - /* 87 */ "db_optr ::= db_optr ablocks", - /* 88 */ "db_optr ::= db_optr tblocks", - /* 89 */ "db_optr ::= db_optr ctime", - /* 90 */ "db_optr ::= db_optr clog", - /* 91 */ "db_optr ::= db_optr comp", - /* 92 */ "db_optr ::= db_optr prec", - /* 93 */ "db_optr ::= db_optr keep", - /* 94 */ "alter_db_optr ::=", - /* 95 */ "alter_db_optr ::= alter_db_optr replica", - /* 96 */ "alter_db_optr ::= alter_db_optr tables", - /* 97 */ "typename ::= ids", - /* 98 */ "typename ::= ids LP signed RP", - /* 99 */ "signed ::= INTEGER", - /* 100 */ "signed ::= PLUS INTEGER", - /* 101 */ "signed ::= MINUS INTEGER", - /* 102 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", - /* 103 */ "create_table_args ::= LP columnlist RP", - /* 104 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", - /* 105 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", - /* 106 */ "create_table_args ::= AS select", - /* 107 */ "columnlist ::= columnlist COMMA column", - /* 108 */ "columnlist ::= column", - /* 109 */ "column ::= ids typename", - /* 110 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 111 */ "tagitemlist ::= tagitem", - /* 112 */ "tagitem ::= INTEGER", - /* 113 */ "tagitem ::= FLOAT", - /* 114 */ "tagitem ::= STRING", - /* 115 */ "tagitem ::= BOOL", - /* 116 */ "tagitem ::= NULL", - /* 117 */ "tagitem ::= MINUS INTEGER", - /* 118 */ "tagitem ::= MINUS FLOAT", - /* 119 */ "tagitem ::= PLUS INTEGER", - /* 120 */ "tagitem ::= PLUS FLOAT", - /* 121 */ "cmd ::= select", + /* 24 */ "cmd ::= SHOW dbPrefix VGROUPS ids", + /* 25 */ "cmd ::= DROP TABLE ifexists ids cpxName", + /* 26 */ "cmd ::= DROP DATABASE ifexists ids", + /* 27 */ "cmd ::= DROP DNODE IPTOKEN", + /* 28 */ "cmd ::= DROP USER ids", + /* 29 */ "cmd ::= DROP ACCOUNT ids", + /* 30 */ "cmd ::= USE ids", + /* 31 */ "cmd ::= DESCRIBE ids cpxName", + /* 32 */ "cmd ::= ALTER USER ids PASS ids", + /* 33 */ "cmd ::= ALTER USER ids PRIVILEGE ids", + /* 34 */ "cmd ::= ALTER DNODE IPTOKEN ids", + /* 35 */ "cmd ::= ALTER DNODE IPTOKEN ids ids", + /* 36 */ "cmd ::= ALTER LOCAL ids", + /* 37 */ "cmd ::= ALTER LOCAL ids ids", + /* 38 */ "cmd ::= ALTER DATABASE ids alter_db_optr", + /* 39 */ "cmd ::= ALTER ACCOUNT ids acct_optr", + /* 40 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", + /* 41 */ "ids ::= ID", + /* 42 */ "ids ::= STRING", + /* 43 */ "ifexists ::= IF EXISTS", + /* 44 */ "ifexists ::=", + /* 45 */ "ifnotexists ::= IF NOT EXISTS", + /* 46 */ "ifnotexists ::=", + /* 47 */ "cmd ::= CREATE DNODE IPTOKEN", + /* 48 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", + /* 49 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", + /* 50 */ "cmd ::= CREATE USER ids PASS ids", + /* 51 */ "pps ::=", + /* 52 */ "pps ::= PPS INTEGER", + /* 53 */ "tseries ::=", + /* 54 */ "tseries ::= TSERIES INTEGER", + /* 55 */ "dbs ::=", + /* 56 */ "dbs ::= DBS INTEGER", + /* 57 */ "streams ::=", + /* 58 */ "streams ::= STREAMS INTEGER", + /* 59 */ "storage ::=", + /* 60 */ "storage ::= STORAGE INTEGER", + /* 61 */ "qtime ::=", + /* 62 */ "qtime ::= QTIME INTEGER", + /* 63 */ "users ::=", + /* 64 */ "users ::= USERS INTEGER", + /* 65 */ "conns ::=", + /* 66 */ "conns ::= CONNS INTEGER", + /* 67 */ "state ::=", + /* 68 */ "state ::= STATE ids", + /* 69 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", + /* 70 */ "keep ::= KEEP tagitemlist", + /* 71 */ "tables ::= TABLES INTEGER", + /* 72 */ "cache ::= CACHE INTEGER", + /* 73 */ "replica ::= REPLICA INTEGER", + /* 74 */ "days ::= DAYS INTEGER", + /* 75 */ "rows ::= ROWS INTEGER", + /* 76 */ "ablocks ::= ABLOCKS ID", + /* 77 */ "tblocks ::= TBLOCKS INTEGER", + /* 78 */ "ctime ::= CTIME INTEGER", + /* 79 */ "clog ::= CLOG INTEGER", + /* 80 */ "comp ::= COMP INTEGER", + /* 81 */ "prec ::= PRECISION STRING", + /* 82 */ "db_optr ::=", + /* 83 */ "db_optr ::= db_optr tables", + /* 84 */ "db_optr ::= db_optr cache", + /* 85 */ "db_optr ::= db_optr replica", + /* 86 */ "db_optr ::= db_optr days", + /* 87 */ "db_optr ::= db_optr rows", + /* 88 */ "db_optr ::= db_optr ablocks", + /* 89 */ "db_optr ::= db_optr tblocks", + /* 90 */ "db_optr ::= db_optr ctime", + /* 91 */ "db_optr ::= db_optr clog", + /* 92 */ "db_optr ::= db_optr comp", + /* 93 */ "db_optr ::= db_optr prec", + /* 94 */ "db_optr ::= db_optr keep", + /* 95 */ "alter_db_optr ::=", + /* 96 */ "alter_db_optr ::= alter_db_optr replica", + /* 97 */ "alter_db_optr ::= alter_db_optr tables", + /* 98 */ "typename ::= ids", + /* 99 */ "typename ::= ids LP signed RP", + /* 100 */ "signed ::= INTEGER", + /* 101 */ "signed ::= PLUS INTEGER", + /* 102 */ "signed ::= MINUS INTEGER", + /* 103 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", + /* 104 */ "create_table_args ::= LP columnlist RP", + /* 105 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", + /* 106 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", + /* 107 */ "create_table_args ::= AS select", + /* 108 */ "columnlist ::= columnlist COMMA column", + /* 109 */ "columnlist ::= column", + /* 110 */ "column ::= ids typename", + /* 111 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 112 */ "tagitemlist ::= tagitem", + /* 113 */ "tagitem ::= INTEGER", + /* 114 */ "tagitem ::= FLOAT", + /* 115 */ "tagitem ::= STRING", + /* 116 */ "tagitem ::= BOOL", + /* 117 */ "tagitem ::= NULL", + /* 118 */ "tagitem ::= MINUS INTEGER", + /* 119 */ "tagitem ::= MINUS FLOAT", + /* 120 */ "tagitem ::= PLUS INTEGER", + /* 121 */ "tagitem ::= PLUS FLOAT", /* 122 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 123 */ "select ::= SELECT selcollist", - /* 124 */ "sclp ::= selcollist COMMA", - /* 125 */ "sclp ::=", - /* 126 */ "selcollist ::= sclp expr as", - /* 127 */ "selcollist ::= sclp STAR", - /* 128 */ "as ::= AS ids", - /* 129 */ "as ::= ids", - /* 130 */ "as ::=", - /* 131 */ "from ::= FROM tablelist", - /* 132 */ "tablelist ::= ids cpxName", - /* 133 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 134 */ "tmvar ::= VARIABLE", - /* 135 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 136 */ "interval_opt ::=", - /* 137 */ "fill_opt ::=", - /* 138 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 139 */ "fill_opt ::= FILL LP ID RP", - /* 140 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 141 */ "sliding_opt ::=", - /* 142 */ "orderby_opt ::=", - /* 143 */ "orderby_opt ::= ORDER BY sortlist", - /* 144 */ "sortlist ::= sortlist COMMA item sortorder", - /* 145 */ "sortlist ::= item sortorder", - /* 146 */ "item ::= ids cpxName", - /* 147 */ "sortorder ::= ASC", - /* 148 */ "sortorder ::= DESC", - /* 149 */ "sortorder ::=", - /* 150 */ "groupby_opt ::=", - /* 151 */ "groupby_opt ::= GROUP BY grouplist", - /* 152 */ "grouplist ::= grouplist COMMA item", - /* 153 */ "grouplist ::= item", - /* 154 */ "having_opt ::=", - /* 155 */ "having_opt ::= HAVING expr", - /* 156 */ "limit_opt ::=", - /* 157 */ "limit_opt ::= LIMIT signed", - /* 158 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 159 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 160 */ "slimit_opt ::=", - /* 161 */ "slimit_opt ::= SLIMIT signed", - /* 162 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 163 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 164 */ "where_opt ::=", - /* 165 */ "where_opt ::= WHERE expr", - /* 166 */ "expr ::= LP expr RP", - /* 167 */ "expr ::= ID", - /* 168 */ "expr ::= ID DOT ID", - /* 169 */ "expr ::= ID DOT STAR", - /* 170 */ "expr ::= INTEGER", - /* 171 */ "expr ::= MINUS INTEGER", - /* 172 */ "expr ::= PLUS INTEGER", - /* 173 */ "expr ::= FLOAT", - /* 174 */ "expr ::= MINUS FLOAT", - /* 175 */ "expr ::= PLUS FLOAT", - /* 176 */ "expr ::= STRING", - /* 177 */ "expr ::= NOW", - /* 178 */ "expr ::= VARIABLE", - /* 179 */ "expr ::= BOOL", - /* 180 */ "expr ::= ID LP exprlist RP", - /* 181 */ "expr ::= ID LP STAR RP", - /* 182 */ "expr ::= expr AND expr", - /* 183 */ "expr ::= expr OR expr", - /* 184 */ "expr ::= expr LT expr", - /* 185 */ "expr ::= expr GT expr", - /* 186 */ "expr ::= expr LE expr", - /* 187 */ "expr ::= expr GE expr", - /* 188 */ "expr ::= expr NE expr", - /* 189 */ "expr ::= expr EQ expr", - /* 190 */ "expr ::= expr PLUS expr", - /* 191 */ "expr ::= expr MINUS expr", - /* 192 */ "expr ::= expr STAR expr", - /* 193 */ "expr ::= expr SLASH expr", - /* 194 */ "expr ::= expr REM expr", - /* 195 */ "expr ::= expr LIKE expr", - /* 196 */ "expr ::= expr IN LP exprlist RP", - /* 197 */ "exprlist ::= exprlist COMMA expritem", - /* 198 */ "exprlist ::= expritem", - /* 199 */ "expritem ::= expr", - /* 200 */ "expritem ::=", - /* 201 */ "cmd ::= INSERT INTO cpxName insert_value_list", - /* 202 */ "insert_value_list ::= VALUES LP itemlist RP", - /* 203 */ "insert_value_list ::= insert_value_list VALUES LP itemlist RP", - /* 204 */ "itemlist ::= itemlist COMMA expr", - /* 205 */ "itemlist ::= expr", + /* 123 */ "union ::= select", + /* 124 */ "union ::= LP union RP", + /* 125 */ "union ::= union UNION ALL select", + /* 126 */ "union ::= union UNION ALL LP select RP", + /* 127 */ "cmd ::= union", + /* 128 */ "select ::= SELECT selcollist", + /* 129 */ "sclp ::= selcollist COMMA", + /* 130 */ "sclp ::=", + /* 131 */ "selcollist ::= sclp expr as", + /* 132 */ "selcollist ::= sclp STAR", + /* 133 */ "as ::= AS ids", + /* 134 */ "as ::= ids", + /* 135 */ "as ::=", + /* 136 */ "from ::= FROM tablelist", + /* 137 */ "tablelist ::= ids cpxName", + /* 138 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 139 */ "tmvar ::= VARIABLE", + /* 140 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 141 */ "interval_opt ::=", + /* 142 */ "fill_opt ::=", + /* 143 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 144 */ "fill_opt ::= FILL LP ID RP", + /* 145 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 146 */ "sliding_opt ::=", + /* 147 */ "orderby_opt ::=", + /* 148 */ "orderby_opt ::= ORDER BY sortlist", + /* 149 */ "sortlist ::= sortlist COMMA item sortorder", + /* 150 */ "sortlist ::= item sortorder", + /* 151 */ "item ::= ids cpxName", + /* 152 */ "sortorder ::= ASC", + /* 153 */ "sortorder ::= DESC", + /* 154 */ "sortorder ::=", + /* 155 */ "groupby_opt ::=", + /* 156 */ "groupby_opt ::= GROUP BY grouplist", + /* 157 */ "grouplist ::= grouplist COMMA item", + /* 158 */ "grouplist ::= item", + /* 159 */ "having_opt ::=", + /* 160 */ "having_opt ::= HAVING expr", + /* 161 */ "limit_opt ::=", + /* 162 */ "limit_opt ::= LIMIT signed", + /* 163 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 164 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 165 */ "slimit_opt ::=", + /* 166 */ "slimit_opt ::= SLIMIT signed", + /* 167 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 168 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 169 */ "where_opt ::=", + /* 170 */ "where_opt ::= WHERE expr", + /* 171 */ "expr ::= LP expr RP", + /* 172 */ "expr ::= ID", + /* 173 */ "expr ::= ID DOT ID", + /* 174 */ "expr ::= ID DOT STAR", + /* 175 */ "expr ::= INTEGER", + /* 176 */ "expr ::= MINUS INTEGER", + /* 177 */ "expr ::= PLUS INTEGER", + /* 178 */ "expr ::= FLOAT", + /* 179 */ "expr ::= MINUS FLOAT", + /* 180 */ "expr ::= PLUS FLOAT", + /* 181 */ "expr ::= STRING", + /* 182 */ "expr ::= NOW", + /* 183 */ "expr ::= VARIABLE", + /* 184 */ "expr ::= BOOL", + /* 185 */ "expr ::= ID LP exprlist RP", + /* 186 */ "expr ::= ID LP STAR RP", + /* 187 */ "expr ::= expr AND expr", + /* 188 */ "expr ::= expr OR expr", + /* 189 */ "expr ::= expr LT expr", + /* 190 */ "expr ::= expr GT expr", + /* 191 */ "expr ::= expr LE expr", + /* 192 */ "expr ::= expr GE expr", + /* 193 */ "expr ::= expr NE expr", + /* 194 */ "expr ::= expr EQ expr", + /* 195 */ "expr ::= expr PLUS expr", + /* 196 */ "expr ::= expr MINUS expr", + /* 197 */ "expr ::= expr STAR expr", + /* 198 */ "expr ::= expr SLASH expr", + /* 199 */ "expr ::= expr REM expr", + /* 200 */ "expr ::= expr LIKE expr", + /* 201 */ "expr ::= expr IN LP exprlist RP", + /* 202 */ "exprlist ::= exprlist COMMA expritem", + /* 203 */ "exprlist ::= expritem", + /* 204 */ "expritem ::= expr", + /* 205 */ "expritem ::=", /* 206 */ "cmd ::= RESET QUERY CACHE", /* 207 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", /* 208 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", @@ -985,24 +1208,34 @@ static const char *const yyRuleName[] = { #if YYSTACKDEPTH<=0 /* -** Try to increase the size of the parser stack. +** Try to increase the size of the parser stack. Return the number +** of errors. Return 0 on success. */ -static void yyGrowStack(yyParser *p){ +static int yyGrowStack(yyParser *p){ int newSize; + int idx; yyStackEntry *pNew; newSize = p->yystksz*2 + 100; - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; + if( p->yystack==&p->yystk0 ){ + pNew = malloc(newSize*sizeof(pNew[0])); + if( pNew ) pNew[0] = p->yystk0; + }else{ + pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + } if( pNew ){ p->yystack = pNew; - p->yystksz = newSize; + p->yytos = &p->yystack[idx]; #ifndef NDEBUG if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows to %d entries!\n", - yyTracePrompt, p->yystksz); + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, p->yystksz, newSize); } #endif + p->yystksz = newSize; } + return pNew==0; } #endif @@ -1015,6 +1248,34 @@ static void yyGrowStack(yyParser *p){ # define YYMALLOCARGTYPE size_t #endif +/* Initialize a new parser that has already been allocated. +*/ +void ParseInit(void *yypParser){ + yyParser *pParser = (yyParser*)yypParser; +#ifdef YYTRACKMAXSTACKDEPTH + pParser->yyhwm = 0; +#endif +#if YYSTACKDEPTH<=0 + pParser->yytos = NULL; + pParser->yystack = NULL; + pParser->yystksz = 0; + if( yyGrowStack(pParser) ){ + pParser->yystack = &pParser->yystk0; + pParser->yystksz = 1; + } +#endif +#ifndef YYNOERRORRECOVERY + pParser->yyerrcnt = -1; +#endif + pParser->yytos = pParser->yystack; + pParser->yystack[0].stateno = 0; + pParser->yystack[0].major = 0; +#if YYSTACKDEPTH>0 + pParser->yystackEnd = &pParser->yystack[YYSTACKDEPTH-1]; +#endif +} + +#ifndef Parse_ENGINEALWAYSONSTACK /* ** This function allocates a new parser. ** The only argument is a pointer to a function which works like @@ -1030,19 +1291,11 @@ static void yyGrowStack(yyParser *p){ void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ yyParser *pParser; pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( pParser ){ - pParser->yyidx = -1; -#ifdef YYTRACKMAXSTACKDEPTH - pParser->yyidxMax = 0; -#endif -#if YYSTACKDEPTH<=0 - pParser->yystack = NULL; - pParser->yystksz = 0; - yyGrowStack(pParser); -#endif - } + if( pParser ) ParseInit(pParser); return pParser; } +#endif /* Parse_ENGINEALWAYSONSTACK */ + /* The following function deletes the "minor type" or semantic value ** associated with a symbol. The symbol can be either a terminal @@ -1069,46 +1322,50 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 216: /* keep */ - case 217: /* tagitemlist */ - case 240: /* fill_opt */ - case 242: /* groupby_opt */ - case 243: /* orderby_opt */ - case 252: /* sortlist */ - case 256: /* grouplist */ + case 223: /* keep */ + case 224: /* tagitemlist */ + case 247: /* fill_opt */ + case 249: /* groupby_opt */ + case 250: /* orderby_opt */ + case 260: /* sortlist */ + case 264: /* grouplist */ +{ +tVariantListDestroy((yypminor->yy30)); +} + break; + case 239: /* columnlist */ { -tVariantListDestroy((yypminor->yy480)); +tFieldListDestroy((yypminor->yy325)); } break; - case 232: /* columnlist */ + case 240: /* select */ { -tFieldListDestroy((yypminor->yy421)); +doDestroyQuerySql((yypminor->yy444)); } break; - case 233: /* select */ + case 243: /* selcollist */ + case 255: /* sclp */ + case 265: /* exprlist */ { -destroyQuerySql((yypminor->yy138)); +tSQLExprListDestroy((yypminor->yy506)); } break; - case 236: /* selcollist */ - case 247: /* sclp */ - case 257: /* exprlist */ - case 260: /* itemlist */ + case 245: /* where_opt */ + case 251: /* having_opt */ + case 256: /* expr */ + case 266: /* expritem */ { -tSQLExprListDestroy((yypminor->yy284)); +tSQLExprDestroy((yypminor->yy388)); } break; - case 238: /* where_opt */ - case 244: /* having_opt */ - case 248: /* expr */ - case 258: /* expritem */ + case 254: /* union */ { -tSQLExprDestroy((yypminor->yy244)); +destroyAllSelectClause((yypminor->yy309)); } break; - case 253: /* sortitem */ + case 261: /* sortitem */ { -tVariantDestroy(&(yypminor->yy236)); +tVariantDestroy(&(yypminor->yy380)); } break; /********* End destructor definitions *****************************************/ @@ -1124,8 +1381,9 @@ tVariantDestroy(&(yypminor->yy236)); */ static void yy_pop_parser_stack(yyParser *pParser){ yyStackEntry *yytos; - assert( pParser->yyidx>=0 ); - yytos = &pParser->yystack[pParser->yyidx--]; + assert( pParser->yytos!=0 ); + assert( pParser->yytos > pParser->yystack ); + yytos = pParser->yytos--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sPopping %s\n", @@ -1136,6 +1394,18 @@ static void yy_pop_parser_stack(yyParser *pParser){ yy_destructor(pParser, yytos->major, &yytos->minor); } +/* +** Clear all secondary memory allocations from the parser +*/ +void ParseFinalize(void *p){ + yyParser *pParser = (yyParser*)p; + while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); +#if YYSTACKDEPTH<=0 + if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); +#endif +} + +#ifndef Parse_ENGINEALWAYSONSTACK /* ** Deallocate and destroy a parser. Destructors are called for ** all stack elements before shutting the parser down. @@ -1148,16 +1418,13 @@ void ParseFree( void *p, /* The parser to be deleted */ void (*freeProc)(void*) /* Function used to reclaim memory */ ){ - yyParser *pParser = (yyParser*)p; #ifndef YYPARSEFREENEVERNULL - if( pParser==0 ) return; -#endif - while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - free(pParser->yystack); + if( p==0 ) return; #endif - (*freeProc)((void*)pParser); + ParseFinalize(p); + (*freeProc)(p); } +#endif /* Parse_ENGINEALWAYSONSTACK */ /* ** Return the peak depth of the stack for a parser. @@ -1165,7 +1432,44 @@ void ParseFree( #ifdef YYTRACKMAXSTACKDEPTH int ParseStackPeak(void *p){ yyParser *pParser = (yyParser*)p; - return pParser->yyidxMax; + return pParser->yyhwm; +} +#endif + +/* This array of booleans keeps track of the parser statement +** coverage. The element yycoverage[X][Y] is set when the parser +** is in state X and has a lookahead token Y. In a well-tested +** systems, every element of this matrix should end up being set. +*/ +#if defined(YYCOVERAGE) +static unsigned char yycoverage[YYNSTATE][YYNTOKEN]; +#endif + +/* +** Write into out a description of every state/lookahead combination that +** +** (1) has not been used by the parser, and +** (2) is not a syntax error. +** +** Return the number of missed state/lookahead combinations. +*/ +#if defined(YYCOVERAGE) +int ParseCoverage(FILE *out){ + int stateno, iLookAhead, i; + int nMissed = 0; + for(stateno=0; statenoyystack[pParser->yyidx].stateno; + int stateno = pParser->yytos->stateno; - if( stateno>=YY_MIN_REDUCE ) return stateno; + if( stateno>YY_MAX_SHIFT ) return stateno; assert( stateno <= YY_SHIFT_COUNT ); +#if defined(YYCOVERAGE) + yycoverage[stateno][iLookAhead] = 1; +#endif do{ i = yy_shift_ofst[stateno]; - if( i==YY_SHIFT_USE_DFLT ) return yy_default[stateno]; + assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); assert( iLookAhead!=YYNOCODE ); + assert( iLookAhead < YYNTOKEN ); i += iLookAhead; - if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ - if( iLookAhead>0 ){ + if( yy_lookahead[i]!=iLookAhead ){ #ifdef YYFALLBACK - YYCODETYPE iFallback; /* Fallback token */ - if( iLookAhead %s\n", - yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); - } -#endif - assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ - iLookAhead = iFallback; - continue; + if( yyTraceFILE ){ + fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n", + yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); } +#endif + assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ + iLookAhead = iFallback; + continue; + } #endif #ifdef YYWILDCARD - { - int j = i - iLookAhead + YYWILDCARD; - if( + { + int j = i - iLookAhead + YYWILDCARD; + if( #if YY_SHIFT_MIN+YYWILDCARD<0 - j>=0 && + j>=0 && #endif #if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT - j0 + ){ #ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", - yyTracePrompt, yyTokenName[iLookAhead], - yyTokenName[YYWILDCARD]); - } -#endif /* NDEBUG */ - return yy_action[j]; + if( yyTraceFILE ){ + fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", + yyTracePrompt, yyTokenName[iLookAhead], + yyTokenName[YYWILDCARD]); } +#endif /* NDEBUG */ + return yy_action[j]; } -#endif /* YYWILDCARD */ } +#endif /* YYWILDCARD */ return yy_default[stateno]; }else{ return yy_action[i]; @@ -1252,7 +1558,6 @@ static int yy_find_reduce_action( assert( stateno<=YY_REDUCE_COUNT ); #endif i = yy_reduce_ofst[stateno]; - assert( i!=YY_REDUCE_USE_DFLT ); assert( iLookAhead!=YYNOCODE ); i += iLookAhead; #ifdef YYERRORSYMBOL @@ -1269,15 +1574,14 @@ static int yy_find_reduce_action( /* ** The following routine is called if the stack overflows. */ -static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){ +static void yyStackOverflow(yyParser *yypParser){ ParseARG_FETCH; - yypParser->yyidx--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will execute if the parser ** stack every overflows */ /******** Begin %stack_overflow code ******************************************/ @@ -1289,20 +1593,21 @@ static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){ ** Print tracing information for a SHIFT action */ #ifndef NDEBUG -static void yyTraceShift(yyParser *yypParser, int yyNewState){ +static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){ if( yyTraceFILE ){ if( yyNewStateyystack[yypParser->yyidx].major], + fprintf(yyTraceFILE,"%s%s '%s', go to state %d\n", + yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major], yyNewState); }else{ - fprintf(yyTraceFILE,"%sShift '%s'\n", - yyTracePrompt,yyTokenName[yypParser->yystack[yypParser->yyidx].major]); + fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n", + yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major], + yyNewState - YY_MIN_REDUCE); } } } #else -# define yyTraceShift(X,Y) +# define yyTraceShift(X,Y,Z) #endif /* @@ -1312,259 +1617,264 @@ static void yy_shift( yyParser *yypParser, /* The parser to be shifted */ int yyNewState, /* The new state to shift in */ int yyMajor, /* The major token to shift in */ - YYMINORTYPE *yypMinor /* Pointer to the minor token to shift in */ + ParseTOKENTYPE yyMinor /* The minor token to shift in */ ){ yyStackEntry *yytos; - yypParser->yyidx++; + yypParser->yytos++; #ifdef YYTRACKMAXSTACKDEPTH - if( yypParser->yyidx>yypParser->yyidxMax ){ - yypParser->yyidxMax = yypParser->yyidx; + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); } #endif #if YYSTACKDEPTH>0 - if( yypParser->yyidx>=YYSTACKDEPTH ){ - yyStackOverflow(yypParser, yypMinor); + if( yypParser->yytos>yypParser->yystackEnd ){ + yypParser->yytos--; + yyStackOverflow(yypParser); return; } #else - if( yypParser->yyidx>=yypParser->yystksz ){ - yyGrowStack(yypParser); - if( yypParser->yyidx>=yypParser->yystksz ){ - yyStackOverflow(yypParser, yypMinor); + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + if( yyGrowStack(yypParser) ){ + yypParser->yytos--; + yyStackOverflow(yypParser); return; } } #endif - yytos = &yypParser->yystack[yypParser->yyidx]; + if( yyNewState > YY_MAX_SHIFT ){ + yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } + yytos = yypParser->yytos; yytos->stateno = (YYACTIONTYPE)yyNewState; yytos->major = (YYCODETYPE)yyMajor; - yytos->minor = *yypMinor; - yyTraceShift(yypParser, yyNewState); + yytos->minor.yy0 = yyMinor; + yyTraceShift(yypParser, yyNewState, "Shift"); } /* The following table contains information about every rule that ** is used during the reduce. */ static const struct { - YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ - unsigned char nrhs; /* Number of right-hand side symbols in the rule */ + YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ + signed char nrhs; /* Negative of the number of RHS symbols in the rule */ } yyRuleInfo[] = { - { 197, 1 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 2 }, - { 198, 3 }, - { 199, 0 }, - { 199, 2 }, - { 201, 0 }, - { 201, 2 }, - { 198, 3 }, - { 198, 5 }, - { 198, 3 }, - { 198, 5 }, - { 198, 3 }, - { 198, 5 }, - { 198, 4 }, - { 198, 3 }, - { 198, 3 }, - { 198, 3 }, - { 198, 2 }, - { 198, 3 }, - { 198, 5 }, - { 198, 5 }, - { 198, 4 }, - { 198, 5 }, - { 198, 3 }, - { 198, 4 }, - { 198, 4 }, - { 198, 4 }, - { 198, 6 }, - { 200, 1 }, - { 200, 1 }, - { 202, 2 }, - { 202, 0 }, - { 205, 3 }, - { 205, 0 }, - { 198, 3 }, - { 198, 6 }, - { 198, 5 }, - { 198, 5 }, - { 207, 0 }, - { 207, 2 }, - { 208, 0 }, - { 208, 2 }, - { 209, 0 }, - { 209, 2 }, - { 210, 0 }, - { 210, 2 }, - { 211, 0 }, - { 211, 2 }, - { 212, 0 }, - { 212, 2 }, - { 213, 0 }, - { 213, 2 }, - { 214, 0 }, - { 214, 2 }, - { 215, 0 }, - { 215, 2 }, - { 204, 9 }, - { 216, 2 }, - { 218, 2 }, - { 219, 2 }, - { 220, 2 }, - { 221, 2 }, - { 222, 2 }, - { 223, 2 }, - { 224, 2 }, - { 225, 2 }, - { 226, 2 }, - { 227, 2 }, - { 228, 2 }, - { 206, 0 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 206, 2 }, - { 203, 0 }, - { 203, 2 }, - { 203, 2 }, - { 229, 1 }, - { 229, 4 }, - { 230, 1 }, - { 230, 2 }, - { 230, 2 }, - { 198, 6 }, - { 231, 3 }, - { 231, 7 }, - { 231, 7 }, - { 231, 2 }, - { 232, 3 }, - { 232, 1 }, - { 234, 2 }, - { 217, 3 }, - { 217, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 1 }, - { 235, 2 }, - { 235, 2 }, - { 235, 2 }, - { 235, 2 }, - { 198, 1 }, - { 233, 12 }, - { 233, 2 }, - { 247, 2 }, - { 247, 0 }, - { 236, 3 }, - { 236, 2 }, - { 249, 2 }, - { 249, 1 }, - { 249, 0 }, - { 237, 2 }, - { 250, 2 }, - { 250, 4 }, - { 251, 1 }, - { 239, 4 }, - { 239, 0 }, - { 240, 0 }, - { 240, 6 }, - { 240, 4 }, - { 241, 4 }, - { 241, 0 }, - { 243, 0 }, - { 243, 3 }, - { 252, 4 }, - { 252, 2 }, - { 254, 2 }, - { 255, 1 }, - { 255, 1 }, - { 255, 0 }, - { 242, 0 }, - { 242, 3 }, - { 256, 3 }, - { 256, 1 }, - { 244, 0 }, - { 244, 2 }, - { 246, 0 }, - { 246, 2 }, - { 246, 4 }, - { 246, 4 }, - { 245, 0 }, - { 245, 2 }, - { 245, 4 }, - { 245, 4 }, - { 238, 0 }, - { 238, 2 }, - { 248, 3 }, - { 248, 1 }, - { 248, 3 }, - { 248, 3 }, - { 248, 1 }, - { 248, 2 }, - { 248, 2 }, - { 248, 1 }, - { 248, 2 }, - { 248, 2 }, - { 248, 1 }, - { 248, 1 }, - { 248, 1 }, - { 248, 1 }, - { 248, 4 }, - { 248, 4 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 3 }, - { 248, 5 }, - { 257, 3 }, - { 257, 1 }, - { 258, 1 }, - { 258, 0 }, - { 198, 4 }, - { 259, 4 }, - { 259, 5 }, - { 260, 3 }, - { 260, 1 }, - { 198, 3 }, - { 198, 7 }, - { 198, 7 }, - { 198, 7 }, - { 198, 7 }, - { 198, 8 }, - { 198, 9 }, - { 198, 5 }, - { 198, 7 }, - { 198, 7 }, + { 204, -1 }, /* (0) program ::= cmd */ + { 205, -2 }, /* (1) cmd ::= SHOW DATABASES */ + { 205, -2 }, /* (2) cmd ::= SHOW MNODES */ + { 205, -2 }, /* (3) cmd ::= SHOW DNODES */ + { 205, -2 }, /* (4) cmd ::= SHOW ACCOUNTS */ + { 205, -2 }, /* (5) cmd ::= SHOW USERS */ + { 205, -2 }, /* (6) cmd ::= SHOW MODULES */ + { 205, -2 }, /* (7) cmd ::= SHOW QUERIES */ + { 205, -2 }, /* (8) cmd ::= SHOW CONNECTIONS */ + { 205, -2 }, /* (9) cmd ::= SHOW STREAMS */ + { 205, -2 }, /* (10) cmd ::= SHOW CONFIGS */ + { 205, -2 }, /* (11) cmd ::= SHOW SCORES */ + { 205, -2 }, /* (12) cmd ::= SHOW GRANTS */ + { 205, -2 }, /* (13) cmd ::= SHOW VNODES */ + { 205, -3 }, /* (14) cmd ::= SHOW VNODES IPTOKEN */ + { 206, 0 }, /* (15) dbPrefix ::= */ + { 206, -2 }, /* (16) dbPrefix ::= ids DOT */ + { 208, 0 }, /* (17) cpxName ::= */ + { 208, -2 }, /* (18) cpxName ::= DOT ids */ + { 205, -3 }, /* (19) cmd ::= SHOW dbPrefix TABLES */ + { 205, -5 }, /* (20) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + { 205, -3 }, /* (21) cmd ::= SHOW dbPrefix STABLES */ + { 205, -5 }, /* (22) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + { 205, -3 }, /* (23) cmd ::= SHOW dbPrefix VGROUPS */ + { 205, -4 }, /* (24) cmd ::= SHOW dbPrefix VGROUPS ids */ + { 205, -5 }, /* (25) cmd ::= DROP TABLE ifexists ids cpxName */ + { 205, -4 }, /* (26) cmd ::= DROP DATABASE ifexists ids */ + { 205, -3 }, /* (27) cmd ::= DROP DNODE IPTOKEN */ + { 205, -3 }, /* (28) cmd ::= DROP USER ids */ + { 205, -3 }, /* (29) cmd ::= DROP ACCOUNT ids */ + { 205, -2 }, /* (30) cmd ::= USE ids */ + { 205, -3 }, /* (31) cmd ::= DESCRIBE ids cpxName */ + { 205, -5 }, /* (32) cmd ::= ALTER USER ids PASS ids */ + { 205, -5 }, /* (33) cmd ::= ALTER USER ids PRIVILEGE ids */ + { 205, -4 }, /* (34) cmd ::= ALTER DNODE IPTOKEN ids */ + { 205, -5 }, /* (35) cmd ::= ALTER DNODE IPTOKEN ids ids */ + { 205, -3 }, /* (36) cmd ::= ALTER LOCAL ids */ + { 205, -4 }, /* (37) cmd ::= ALTER LOCAL ids ids */ + { 205, -4 }, /* (38) cmd ::= ALTER DATABASE ids alter_db_optr */ + { 205, -4 }, /* (39) cmd ::= ALTER ACCOUNT ids acct_optr */ + { 205, -6 }, /* (40) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + { 207, -1 }, /* (41) ids ::= ID */ + { 207, -1 }, /* (42) ids ::= STRING */ + { 209, -2 }, /* (43) ifexists ::= IF EXISTS */ + { 209, 0 }, /* (44) ifexists ::= */ + { 212, -3 }, /* (45) ifnotexists ::= IF NOT EXISTS */ + { 212, 0 }, /* (46) ifnotexists ::= */ + { 205, -3 }, /* (47) cmd ::= CREATE DNODE IPTOKEN */ + { 205, -6 }, /* (48) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + { 205, -5 }, /* (49) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + { 205, -5 }, /* (50) cmd ::= CREATE USER ids PASS ids */ + { 214, 0 }, /* (51) pps ::= */ + { 214, -2 }, /* (52) pps ::= PPS INTEGER */ + { 215, 0 }, /* (53) tseries ::= */ + { 215, -2 }, /* (54) tseries ::= TSERIES INTEGER */ + { 216, 0 }, /* (55) dbs ::= */ + { 216, -2 }, /* (56) dbs ::= DBS INTEGER */ + { 217, 0 }, /* (57) streams ::= */ + { 217, -2 }, /* (58) streams ::= STREAMS INTEGER */ + { 218, 0 }, /* (59) storage ::= */ + { 218, -2 }, /* (60) storage ::= STORAGE INTEGER */ + { 219, 0 }, /* (61) qtime ::= */ + { 219, -2 }, /* (62) qtime ::= QTIME INTEGER */ + { 220, 0 }, /* (63) users ::= */ + { 220, -2 }, /* (64) users ::= USERS INTEGER */ + { 221, 0 }, /* (65) conns ::= */ + { 221, -2 }, /* (66) conns ::= CONNS INTEGER */ + { 222, 0 }, /* (67) state ::= */ + { 222, -2 }, /* (68) state ::= STATE ids */ + { 211, -9 }, /* (69) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + { 223, -2 }, /* (70) keep ::= KEEP tagitemlist */ + { 225, -2 }, /* (71) tables ::= TABLES INTEGER */ + { 226, -2 }, /* (72) cache ::= CACHE INTEGER */ + { 227, -2 }, /* (73) replica ::= REPLICA INTEGER */ + { 228, -2 }, /* (74) days ::= DAYS INTEGER */ + { 229, -2 }, /* (75) rows ::= ROWS INTEGER */ + { 230, -2 }, /* (76) ablocks ::= ABLOCKS ID */ + { 231, -2 }, /* (77) tblocks ::= TBLOCKS INTEGER */ + { 232, -2 }, /* (78) ctime ::= CTIME INTEGER */ + { 233, -2 }, /* (79) clog ::= CLOG INTEGER */ + { 234, -2 }, /* (80) comp ::= COMP INTEGER */ + { 235, -2 }, /* (81) prec ::= PRECISION STRING */ + { 213, 0 }, /* (82) db_optr ::= */ + { 213, -2 }, /* (83) db_optr ::= db_optr tables */ + { 213, -2 }, /* (84) db_optr ::= db_optr cache */ + { 213, -2 }, /* (85) db_optr ::= db_optr replica */ + { 213, -2 }, /* (86) db_optr ::= db_optr days */ + { 213, -2 }, /* (87) db_optr ::= db_optr rows */ + { 213, -2 }, /* (88) db_optr ::= db_optr ablocks */ + { 213, -2 }, /* (89) db_optr ::= db_optr tblocks */ + { 213, -2 }, /* (90) db_optr ::= db_optr ctime */ + { 213, -2 }, /* (91) db_optr ::= db_optr clog */ + { 213, -2 }, /* (92) db_optr ::= db_optr comp */ + { 213, -2 }, /* (93) db_optr ::= db_optr prec */ + { 213, -2 }, /* (94) db_optr ::= db_optr keep */ + { 210, 0 }, /* (95) alter_db_optr ::= */ + { 210, -2 }, /* (96) alter_db_optr ::= alter_db_optr replica */ + { 210, -2 }, /* (97) alter_db_optr ::= alter_db_optr tables */ + { 236, -1 }, /* (98) typename ::= ids */ + { 236, -4 }, /* (99) typename ::= ids LP signed RP */ + { 237, -1 }, /* (100) signed ::= INTEGER */ + { 237, -2 }, /* (101) signed ::= PLUS INTEGER */ + { 237, -2 }, /* (102) signed ::= MINUS INTEGER */ + { 205, -6 }, /* (103) cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ + { 238, -3 }, /* (104) create_table_args ::= LP columnlist RP */ + { 238, -7 }, /* (105) create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ + { 238, -7 }, /* (106) create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ + { 238, -2 }, /* (107) create_table_args ::= AS select */ + { 239, -3 }, /* (108) columnlist ::= columnlist COMMA column */ + { 239, -1 }, /* (109) columnlist ::= column */ + { 241, -2 }, /* (110) column ::= ids typename */ + { 224, -3 }, /* (111) tagitemlist ::= tagitemlist COMMA tagitem */ + { 224, -1 }, /* (112) tagitemlist ::= tagitem */ + { 242, -1 }, /* (113) tagitem ::= INTEGER */ + { 242, -1 }, /* (114) tagitem ::= FLOAT */ + { 242, -1 }, /* (115) tagitem ::= STRING */ + { 242, -1 }, /* (116) tagitem ::= BOOL */ + { 242, -1 }, /* (117) tagitem ::= NULL */ + { 242, -2 }, /* (118) tagitem ::= MINUS INTEGER */ + { 242, -2 }, /* (119) tagitem ::= MINUS FLOAT */ + { 242, -2 }, /* (120) tagitem ::= PLUS INTEGER */ + { 242, -2 }, /* (121) tagitem ::= PLUS FLOAT */ + { 240, -12 }, /* (122) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 254, -1 }, /* (123) union ::= select */ + { 254, -3 }, /* (124) union ::= LP union RP */ + { 254, -4 }, /* (125) union ::= union UNION ALL select */ + { 254, -6 }, /* (126) union ::= union UNION ALL LP select RP */ + { 205, -1 }, /* (127) cmd ::= union */ + { 240, -2 }, /* (128) select ::= SELECT selcollist */ + { 255, -2 }, /* (129) sclp ::= selcollist COMMA */ + { 255, 0 }, /* (130) sclp ::= */ + { 243, -3 }, /* (131) selcollist ::= sclp expr as */ + { 243, -2 }, /* (132) selcollist ::= sclp STAR */ + { 257, -2 }, /* (133) as ::= AS ids */ + { 257, -1 }, /* (134) as ::= ids */ + { 257, 0 }, /* (135) as ::= */ + { 244, -2 }, /* (136) from ::= FROM tablelist */ + { 258, -2 }, /* (137) tablelist ::= ids cpxName */ + { 258, -4 }, /* (138) tablelist ::= tablelist COMMA ids cpxName */ + { 259, -1 }, /* (139) tmvar ::= VARIABLE */ + { 246, -4 }, /* (140) interval_opt ::= INTERVAL LP tmvar RP */ + { 246, 0 }, /* (141) interval_opt ::= */ + { 247, 0 }, /* (142) fill_opt ::= */ + { 247, -6 }, /* (143) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 247, -4 }, /* (144) fill_opt ::= FILL LP ID RP */ + { 248, -4 }, /* (145) sliding_opt ::= SLIDING LP tmvar RP */ + { 248, 0 }, /* (146) sliding_opt ::= */ + { 250, 0 }, /* (147) orderby_opt ::= */ + { 250, -3 }, /* (148) orderby_opt ::= ORDER BY sortlist */ + { 260, -4 }, /* (149) sortlist ::= sortlist COMMA item sortorder */ + { 260, -2 }, /* (150) sortlist ::= item sortorder */ + { 262, -2 }, /* (151) item ::= ids cpxName */ + { 263, -1 }, /* (152) sortorder ::= ASC */ + { 263, -1 }, /* (153) sortorder ::= DESC */ + { 263, 0 }, /* (154) sortorder ::= */ + { 249, 0 }, /* (155) groupby_opt ::= */ + { 249, -3 }, /* (156) groupby_opt ::= GROUP BY grouplist */ + { 264, -3 }, /* (157) grouplist ::= grouplist COMMA item */ + { 264, -1 }, /* (158) grouplist ::= item */ + { 251, 0 }, /* (159) having_opt ::= */ + { 251, -2 }, /* (160) having_opt ::= HAVING expr */ + { 253, 0 }, /* (161) limit_opt ::= */ + { 253, -2 }, /* (162) limit_opt ::= LIMIT signed */ + { 253, -4 }, /* (163) limit_opt ::= LIMIT signed OFFSET signed */ + { 253, -4 }, /* (164) limit_opt ::= LIMIT signed COMMA signed */ + { 252, 0 }, /* (165) slimit_opt ::= */ + { 252, -2 }, /* (166) slimit_opt ::= SLIMIT signed */ + { 252, -4 }, /* (167) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 252, -4 }, /* (168) slimit_opt ::= SLIMIT signed COMMA signed */ + { 245, 0 }, /* (169) where_opt ::= */ + { 245, -2 }, /* (170) where_opt ::= WHERE expr */ + { 256, -3 }, /* (171) expr ::= LP expr RP */ + { 256, -1 }, /* (172) expr ::= ID */ + { 256, -3 }, /* (173) expr ::= ID DOT ID */ + { 256, -3 }, /* (174) expr ::= ID DOT STAR */ + { 256, -1 }, /* (175) expr ::= INTEGER */ + { 256, -2 }, /* (176) expr ::= MINUS INTEGER */ + { 256, -2 }, /* (177) expr ::= PLUS INTEGER */ + { 256, -1 }, /* (178) expr ::= FLOAT */ + { 256, -2 }, /* (179) expr ::= MINUS FLOAT */ + { 256, -2 }, /* (180) expr ::= PLUS FLOAT */ + { 256, -1 }, /* (181) expr ::= STRING */ + { 256, -1 }, /* (182) expr ::= NOW */ + { 256, -1 }, /* (183) expr ::= VARIABLE */ + { 256, -1 }, /* (184) expr ::= BOOL */ + { 256, -4 }, /* (185) expr ::= ID LP exprlist RP */ + { 256, -4 }, /* (186) expr ::= ID LP STAR RP */ + { 256, -3 }, /* (187) expr ::= expr AND expr */ + { 256, -3 }, /* (188) expr ::= expr OR expr */ + { 256, -3 }, /* (189) expr ::= expr LT expr */ + { 256, -3 }, /* (190) expr ::= expr GT expr */ + { 256, -3 }, /* (191) expr ::= expr LE expr */ + { 256, -3 }, /* (192) expr ::= expr GE expr */ + { 256, -3 }, /* (193) expr ::= expr NE expr */ + { 256, -3 }, /* (194) expr ::= expr EQ expr */ + { 256, -3 }, /* (195) expr ::= expr PLUS expr */ + { 256, -3 }, /* (196) expr ::= expr MINUS expr */ + { 256, -3 }, /* (197) expr ::= expr STAR expr */ + { 256, -3 }, /* (198) expr ::= expr SLASH expr */ + { 256, -3 }, /* (199) expr ::= expr REM expr */ + { 256, -3 }, /* (200) expr ::= expr LIKE expr */ + { 256, -5 }, /* (201) expr ::= expr IN LP exprlist RP */ + { 265, -3 }, /* (202) exprlist ::= exprlist COMMA expritem */ + { 265, -1 }, /* (203) exprlist ::= expritem */ + { 266, -1 }, /* (204) expritem ::= expr */ + { 266, 0 }, /* (205) expritem ::= */ + { 205, -3 }, /* (206) cmd ::= RESET QUERY CACHE */ + { 205, -7 }, /* (207) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 205, -7 }, /* (208) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 205, -7 }, /* (209) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 205, -7 }, /* (210) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 205, -8 }, /* (211) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 205, -9 }, /* (212) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 205, -5 }, /* (213) cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */ + { 205, -7 }, /* (214) cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */ + { 205, -7 }, /* (215) cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -1572,27 +1882,66 @@ static void yy_accept(yyParser*); /* Forward Declaration */ /* ** Perform a reduce action and the shift that must immediately ** follow the reduce. +** +** The yyLookahead and yyLookaheadToken parameters provide reduce actions +** access to the lookahead token (if any). The yyLookahead will be YYNOCODE +** if the lookahead token has already been consumed. As this procedure is +** only called from one place, optimizing compilers will in-line it, which +** means that the extra parameters have no performance impact. */ static void yy_reduce( yyParser *yypParser, /* The parser */ - int yyruleno /* Number of the rule by which to reduce */ + unsigned int yyruleno, /* Number of the rule by which to reduce */ + int yyLookahead, /* Lookahead token, or YYNOCODE if none */ + ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ ){ int yygoto; /* The next state */ int yyact; /* The next action */ - YYMINORTYPE yygotominor; /* The LHS of the rule reduced */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ ParseARG_FETCH; - yymsp = &yypParser->yystack[yypParser->yyidx]; + (void)yyLookahead; + (void)yyLookaheadToken; + yymsp = yypParser->yytos; #ifndef NDEBUG - if( yyTraceFILE && yyruleno>=0 - && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ + if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ yysize = yyRuleInfo[yyruleno].nrhs; - fprintf(yyTraceFILE, "%sReduce [%s], go to state %d.\n", yyTracePrompt, - yyRuleName[yyruleno], yymsp[-yysize].stateno); + if( yysize ){ + fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n", + yyTracePrompt, + yyruleno, yyRuleName[yyruleno], yymsp[yysize].stateno); + }else{ + fprintf(yyTraceFILE, "%sReduce %d [%s].\n", + yyTracePrompt, yyruleno, yyRuleName[yyruleno]); + } } #endif /* NDEBUG */ - yygotominor = yyzerominor; + + /* Check that the stack is large enough to grow by a single entry + ** if the RHS of the rule is empty. This ensures that there is room + ** enough on the stack to push the LHS value */ + if( yyRuleInfo[yyruleno].nrhs==0 ){ +#ifdef YYTRACKMAXSTACKDEPTH + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=yypParser->yystackEnd ){ + yyStackOverflow(yypParser); + return; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + return; + } + yymsp = yypParser->yytos; + } +#endif + } switch( yyruleno ){ /* Beginning here are the reduction cases. A typical example @@ -1604,614 +1953,705 @@ static void yy_reduce( ** break; */ /********** Begin reduce actions **********************************************/ + YYMINORTYPE yylhsminor; case 0: /* program ::= cmd */ {} break; case 1: /* cmd ::= SHOW DATABASES */ -{ setDCLSQLElems(pInfo, SHOW_DATABASES, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} break; case 2: /* cmd ::= SHOW MNODES */ -{ setDCLSQLElems(pInfo, SHOW_MNODES, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} break; case 3: /* cmd ::= SHOW DNODES */ -{ setDCLSQLElems(pInfo, SHOW_DNODES, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} break; case 4: /* cmd ::= SHOW ACCOUNTS */ -{ setDCLSQLElems(pInfo, SHOW_ACCOUNTS, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} break; case 5: /* cmd ::= SHOW USERS */ -{ setDCLSQLElems(pInfo, SHOW_USERS, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} break; case 6: /* cmd ::= SHOW MODULES */ -{ setDCLSQLElems(pInfo, SHOW_MODULES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } break; case 7: /* cmd ::= SHOW QUERIES */ -{ setDCLSQLElems(pInfo, SHOW_QUERIES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } break; case 8: /* cmd ::= SHOW CONNECTIONS */ -{ setDCLSQLElems(pInfo, SHOW_CONNECTIONS, 0);} +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} break; case 9: /* cmd ::= SHOW STREAMS */ -{ setDCLSQLElems(pInfo, SHOW_STREAMS, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } break; case 10: /* cmd ::= SHOW CONFIGS */ -{ setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONFIGS, 0, 0); } break; case 11: /* cmd ::= SHOW SCORES */ -{ setDCLSQLElems(pInfo, SHOW_SCORES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } break; case 12: /* cmd ::= SHOW GRANTS */ -{ setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } break; case 13: /* cmd ::= SHOW VNODES */ -{ setDCLSQLElems(pInfo, SHOW_VNODES, 0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } break; case 14: /* cmd ::= SHOW VNODES IPTOKEN */ -{ setDCLSQLElems(pInfo, SHOW_VNODES, 1, &yymsp[0].minor.yy0); } +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &yymsp[0].minor.yy0, 0); } break; case 15: /* dbPrefix ::= */ - case 43: /* ifexists ::= */ yytestcase(yyruleno==43); - case 45: /* ifnotexists ::= */ yytestcase(yyruleno==45); -{yygotominor.yy0.n = 0;} +{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.type = 0;} break; case 16: /* dbPrefix ::= ids DOT */ -{yygotominor.yy0 = yymsp[-1].minor.yy0; } +{yylhsminor.yy0 = yymsp[-1].minor.yy0; } + yymsp[-1].minor.yy0 = yylhsminor.yy0; break; case 17: /* cpxName ::= */ -{yygotominor.yy0.n = 0; } +{yymsp[1].minor.yy0.n = 0; } break; case 18: /* cpxName ::= DOT ids */ -{yygotominor.yy0 = yymsp[0].minor.yy0; yygotominor.yy0.n += 1; } +{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; } break; case 19: /* cmd ::= SHOW dbPrefix TABLES */ { - setDCLSQLElems(pInfo, SHOW_TABLES, 1, &yymsp[-1].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0); } break; case 20: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ { - setDCLSQLElems(pInfo, SHOW_TABLES, 2, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } break; case 21: /* cmd ::= SHOW dbPrefix STABLES */ { - setDCLSQLElems(pInfo, SHOW_STABLES, 1, &yymsp[-1].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0); } break; case 22: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ { SSQLToken token; setDBName(&token, &yymsp[-3].minor.yy0); - setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &yymsp[0].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } break; case 23: /* cmd ::= SHOW dbPrefix VGROUPS */ { SSQLToken token; setDBName(&token, &yymsp[-1].minor.yy0); - setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } break; - case 24: /* cmd ::= DROP TABLE ifexists ids cpxName */ + case 24: /* cmd ::= SHOW dbPrefix VGROUPS ids */ { - yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - setDCLSQLElems(pInfo, DROP_TABLE, 2, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); + SSQLToken token; + setDBName(&token, &yymsp[-2].minor.yy0); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } break; - case 25: /* cmd ::= DROP DATABASE ifexists ids */ -{ setDCLSQLElems(pInfo, DROP_DATABASE, 2, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } - break; - case 26: /* cmd ::= DROP DNODE IPTOKEN */ -{ setDCLSQLElems(pInfo, DROP_DNODE, 1, &yymsp[0].minor.yy0); } - break; - case 27: /* cmd ::= DROP USER ids */ -{ setDCLSQLElems(pInfo, DROP_USER, 1, &yymsp[0].minor.yy0); } - break; - case 28: /* cmd ::= DROP ACCOUNT ids */ -{ setDCLSQLElems(pInfo, DROP_ACCOUNT, 1, &yymsp[0].minor.yy0); } - break; - case 29: /* cmd ::= USE ids */ -{ setDCLSQLElems(pInfo, USE_DATABASE, 1, &yymsp[0].minor.yy0);} - break; - case 30: /* cmd ::= DESCRIBE ids cpxName */ + case 25: /* cmd ::= DROP TABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); + setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 31: /* cmd ::= ALTER USER ids PASS ids */ -{ setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 32: /* cmd ::= ALTER USER ids PRIVILEGE ids */ -{ setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} - break; - case 33: /* cmd ::= ALTER DNODE IPTOKEN ids */ -{ setDCLSQLElems(pInfo, ALTER_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 34: /* cmd ::= ALTER DNODE IPTOKEN ids ids */ -{ setDCLSQLElems(pInfo, ALTER_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 35: /* cmd ::= ALTER LOCAL ids */ -{ setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &yymsp[0].minor.yy0); } - break; - case 36: /* cmd ::= ALTER LOCAL ids ids */ -{ setDCLSQLElems(pInfo, ALTER_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } + case 26: /* cmd ::= DROP DATABASE ifexists ids */ +{ setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } break; - case 37: /* cmd ::= ALTER DATABASE ids alter_db_optr */ -{ SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &t);} + case 27: /* cmd ::= DROP DNODE IPTOKEN */ +{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } break; - case 38: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ SSQLToken t = {0}; setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-1].minor.yy0, &t, &yymsp[0].minor.yy155);} + case 28: /* cmd ::= DROP USER ids */ +{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } break; - case 39: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy155);} + case 29: /* cmd ::= DROP ACCOUNT ids */ +{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } break; - case 40: /* ids ::= ID */ - case 41: /* ids ::= STRING */ yytestcase(yyruleno==41); -{yygotominor.yy0 = yymsp[0].minor.yy0; } + case 30: /* cmd ::= USE ids */ +{ setDCLSQLElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} break; - case 42: /* ifexists ::= IF EXISTS */ - case 44: /* ifnotexists ::= IF NOT EXISTS */ yytestcase(yyruleno==44); -{yygotominor.yy0.n = 1;} - break; - case 46: /* cmd ::= CREATE DNODE IPTOKEN */ -{ setDCLSQLElems(pInfo, CREATE_DNODE, 1, &yymsp[0].minor.yy0);} - break; - case 47: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy155);} - break; - case 48: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ -{ setCreateDBSQL(pInfo, CREATE_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &yymsp[-2].minor.yy0);} - break; - case 49: /* cmd ::= CREATE USER ids PASS ids */ -{ setDCLSQLElems(pInfo, CREATE_USER, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} - break; - case 50: /* pps ::= */ - case 52: /* tseries ::= */ yytestcase(yyruleno==52); - case 54: /* dbs ::= */ yytestcase(yyruleno==54); - case 56: /* streams ::= */ yytestcase(yyruleno==56); - case 58: /* storage ::= */ yytestcase(yyruleno==58); - case 60: /* qtime ::= */ yytestcase(yyruleno==60); - case 62: /* users ::= */ yytestcase(yyruleno==62); - case 64: /* conns ::= */ yytestcase(yyruleno==64); - case 66: /* state ::= */ yytestcase(yyruleno==66); -{yygotominor.yy0.n = 0; } - break; - case 51: /* pps ::= PPS INTEGER */ - case 53: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==53); - case 55: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==55); - case 57: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==57); - case 59: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==59); - case 61: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==61); - case 63: /* users ::= USERS INTEGER */ yytestcase(yyruleno==63); - case 65: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==65); - case 67: /* state ::= STATE ids */ yytestcase(yyruleno==67); -{yygotominor.yy0 = yymsp[0].minor.yy0; } - break; - case 68: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + case 31: /* cmd ::= DESCRIBE ids cpxName */ { - yygotominor.yy155.users = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yygotominor.yy155.dbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yygotominor.yy155.tseries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yygotominor.yy155.streams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yygotominor.yy155.pps = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yygotominor.yy155.storage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yygotominor.yy155.qtime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yygotominor.yy155.conns = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yygotominor.yy155.stat = yymsp[0].minor.yy0; + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + setDCLSQLElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } break; - case 69: /* keep ::= KEEP tagitemlist */ -{ yygotominor.yy480 = yymsp[0].minor.yy480; } + case 32: /* cmd ::= ALTER USER ids PASS ids */ +{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } + break; + case 33: /* cmd ::= ALTER USER ids PRIVILEGE ids */ +{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} + break; + case 34: /* cmd ::= ALTER DNODE IPTOKEN ids */ +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 70: /* tables ::= TABLES INTEGER */ - case 71: /* cache ::= CACHE INTEGER */ yytestcase(yyruleno==71); - case 72: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==72); - case 73: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==73); - case 74: /* rows ::= ROWS INTEGER */ yytestcase(yyruleno==74); - case 75: /* ablocks ::= ABLOCKS ID */ yytestcase(yyruleno==75); - case 76: /* tblocks ::= TBLOCKS INTEGER */ yytestcase(yyruleno==76); - case 77: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==77); - case 78: /* clog ::= CLOG INTEGER */ yytestcase(yyruleno==78); - case 79: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==79); - case 80: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==80); -{ yygotominor.yy0 = yymsp[0].minor.yy0; } + case 35: /* cmd ::= ALTER DNODE IPTOKEN ids ids */ +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 81: /* db_optr ::= */ -{setDefaultCreateDbOption(&yygotominor.yy262);} + case 36: /* cmd ::= ALTER LOCAL ids */ +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } break; - case 82: /* db_optr ::= db_optr tables */ - case 96: /* alter_db_optr ::= alter_db_optr tables */ yytestcase(yyruleno==96); -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.tablesPerVnode = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 37: /* cmd ::= ALTER LOCAL ids ids */ +{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 83: /* db_optr ::= db_optr cache */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 38: /* cmd ::= ALTER DATABASE ids alter_db_optr */ +{ SSQLToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy532, &t);} break; - case 84: /* db_optr ::= db_optr replica */ - case 95: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==95); -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 39: /* cmd ::= ALTER ACCOUNT ids acct_optr */ +{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy239);} break; - case 85: /* db_optr ::= db_optr days */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 40: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ +{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy239);} break; - case 86: /* db_optr ::= db_optr rows */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.rowPerFileBlock = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 41: /* ids ::= ID */ + case 42: /* ids ::= STRING */ yytestcase(yyruleno==42); +{yylhsminor.yy0 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 87: /* db_optr ::= db_optr ablocks */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.numOfAvgCacheBlocks = strtod(yymsp[0].minor.yy0.z, NULL); } + case 43: /* ifexists ::= IF EXISTS */ +{yymsp[-1].minor.yy0.n = 1;} break; - case 88: /* db_optr ::= db_optr tblocks */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.numOfBlocksPerTable = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 44: /* ifexists ::= */ + case 46: /* ifnotexists ::= */ yytestcase(yyruleno==46); +{yymsp[1].minor.yy0.n = 0;} break; - case 89: /* db_optr ::= db_optr ctime */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 45: /* ifnotexists ::= IF NOT EXISTS */ +{yymsp[-2].minor.yy0.n = 1;} break; - case 90: /* db_optr ::= db_optr clog */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.commitLog = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 47: /* cmd ::= CREATE DNODE IPTOKEN */ +{ setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; - case 91: /* db_optr ::= db_optr comp */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 48: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ +{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy239);} break; - case 92: /* db_optr ::= db_optr prec */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.precision = yymsp[0].minor.yy0; } + case 49: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ +{ setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy532, &yymsp[-2].minor.yy0);} break; - case 93: /* db_optr ::= db_optr keep */ -{ yygotominor.yy262 = yymsp[-1].minor.yy262; yygotominor.yy262.keep = yymsp[0].minor.yy480; } + case 50: /* cmd ::= CREATE USER ids PASS ids */ +{ setCreateUserSQL(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; - case 94: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yygotominor.yy262);} + case 51: /* pps ::= */ + case 53: /* tseries ::= */ yytestcase(yyruleno==53); + case 55: /* dbs ::= */ yytestcase(yyruleno==55); + case 57: /* streams ::= */ yytestcase(yyruleno==57); + case 59: /* storage ::= */ yytestcase(yyruleno==59); + case 61: /* qtime ::= */ yytestcase(yyruleno==61); + case 63: /* users ::= */ yytestcase(yyruleno==63); + case 65: /* conns ::= */ yytestcase(yyruleno==65); + case 67: /* state ::= */ yytestcase(yyruleno==67); +{yymsp[1].minor.yy0.n = 0; } break; - case 97: /* typename ::= ids */ -{ tSQLSetColumnType (&yygotominor.yy397, &yymsp[0].minor.yy0); } + case 52: /* pps ::= PPS INTEGER */ + case 54: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==54); + case 56: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==56); + case 58: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==58); + case 60: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==60); + case 62: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==62); + case 64: /* users ::= USERS INTEGER */ yytestcase(yyruleno==64); + case 66: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==66); + case 68: /* state ::= STATE ids */ yytestcase(yyruleno==68); +{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 98: /* typename ::= ids LP signed RP */ + case 69: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy369; // negative value of name length - tSQLSetColumnType(&yygotominor.yy397, &yymsp[-3].minor.yy0); + yylhsminor.yy239.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy239.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy239.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy239.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy239.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy239.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy239.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy239.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy239.stat = yymsp[0].minor.yy0; } + yymsp[-8].minor.yy239 = yylhsminor.yy239; + break; + case 70: /* keep ::= KEEP tagitemlist */ +{ yymsp[-1].minor.yy30 = yymsp[0].minor.yy30; } + break; + case 71: /* tables ::= TABLES INTEGER */ + case 72: /* cache ::= CACHE INTEGER */ yytestcase(yyruleno==72); + case 73: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==73); + case 74: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==74); + case 75: /* rows ::= ROWS INTEGER */ yytestcase(yyruleno==75); + case 76: /* ablocks ::= ABLOCKS ID */ yytestcase(yyruleno==76); + case 77: /* tblocks ::= TBLOCKS INTEGER */ yytestcase(yyruleno==77); + case 78: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==78); + case 79: /* clog ::= CLOG INTEGER */ yytestcase(yyruleno==79); + case 80: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==80); + case 81: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==81); +{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } + break; + case 82: /* db_optr ::= */ +{setDefaultCreateDbOption(&yymsp[1].minor.yy532);} + break; + case 83: /* db_optr ::= db_optr tables */ + case 97: /* alter_db_optr ::= alter_db_optr tables */ yytestcase(yyruleno==97); +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.tablesPerVnode = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 84: /* db_optr ::= db_optr cache */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 85: /* db_optr ::= db_optr replica */ + case 96: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==96); +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 86: /* db_optr ::= db_optr days */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 87: /* db_optr ::= db_optr rows */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.rowPerFileBlock = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 88: /* db_optr ::= db_optr ablocks */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.numOfAvgCacheBlocks = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 89: /* db_optr ::= db_optr tblocks */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.numOfBlocksPerTable = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 90: /* db_optr ::= db_optr ctime */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 91: /* db_optr ::= db_optr clog */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.commitLog = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 92: /* db_optr ::= db_optr comp */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 93: /* db_optr ::= db_optr prec */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 94: /* db_optr ::= db_optr keep */ +{ yylhsminor.yy532 = yymsp[-1].minor.yy532; yylhsminor.yy532.keep = yymsp[0].minor.yy30; } + yymsp[-1].minor.yy532 = yylhsminor.yy532; + break; + case 95: /* alter_db_optr ::= */ +{ setDefaultCreateDbOption(&yymsp[1].minor.yy532);} + break; + case 98: /* typename ::= ids */ +{ tSQLSetColumnType (&yylhsminor.yy505, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy505 = yylhsminor.yy505; + break; + case 99: /* typename ::= ids LP signed RP */ +{ + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy489; // negative value of name length + tSQLSetColumnType(&yylhsminor.yy505, &yymsp[-3].minor.yy0); +} + yymsp[-3].minor.yy505 = yylhsminor.yy505; + break; + case 100: /* signed ::= INTEGER */ +{ yylhsminor.yy489 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy489 = yylhsminor.yy489; break; - case 99: /* signed ::= INTEGER */ - case 100: /* signed ::= PLUS INTEGER */ yytestcase(yyruleno==100); -{ yygotominor.yy369 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 101: /* signed ::= PLUS INTEGER */ +{ yymsp[-1].minor.yy489 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 101: /* signed ::= MINUS INTEGER */ -{ yygotominor.yy369 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} + case 102: /* signed ::= MINUS INTEGER */ +{ yymsp[-1].minor.yy489 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; - case 102: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ + case 103: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; setCreatedMeterName(pInfo, &yymsp[-2].minor.yy0, &yymsp[-3].minor.yy0); } break; - case 103: /* create_table_args ::= LP columnlist RP */ + case 104: /* create_table_args ::= LP columnlist RP */ { - yygotominor.yy344 = tSetCreateSQLElems(yymsp[-1].minor.yy421, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_NORMAL_METER); + yymsp[-2].minor.yy212 = tSetCreateSQLElems(yymsp[-1].minor.yy325, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); + setSQLInfo(pInfo, yymsp[-2].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 104: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ + case 105: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ { - yygotominor.yy344 = tSetCreateSQLElems(yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_NORMAL_METRIC); + yymsp[-6].minor.yy212 = tSetCreateSQLElems(yymsp[-5].minor.yy325, yymsp[-1].minor.yy325, NULL, NULL, NULL, TSQL_CREATE_STABLE); + setSQLInfo(pInfo, yymsp[-6].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 105: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ + case 106: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; - yygotominor.yy344 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy480, NULL, TSQL_CREATE_METER_FROM_METRIC); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_METER_FROM_METRIC); + yymsp[-6].minor.yy212 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy30, NULL, TSQL_CREATE_TABLE_FROM_STABLE); + setSQLInfo(pInfo, yymsp[-6].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 106: /* create_table_args ::= AS select */ + case 107: /* create_table_args ::= AS select */ { - yygotominor.yy344 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy138, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, yygotominor.yy344, NULL, TSQL_CREATE_STREAM); + yymsp[-1].minor.yy212 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy444, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, yymsp[-1].minor.yy212, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 107: /* columnlist ::= columnlist COMMA column */ -{yygotominor.yy421 = tFieldListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy397); } + case 108: /* columnlist ::= columnlist COMMA column */ +{yylhsminor.yy325 = tFieldListAppend(yymsp[-2].minor.yy325, &yymsp[0].minor.yy505); } + yymsp[-2].minor.yy325 = yylhsminor.yy325; break; - case 108: /* columnlist ::= column */ -{yygotominor.yy421 = tFieldListAppend(NULL, &yymsp[0].minor.yy397);} + case 109: /* columnlist ::= column */ +{yylhsminor.yy325 = tFieldListAppend(NULL, &yymsp[0].minor.yy505);} + yymsp[0].minor.yy325 = yylhsminor.yy325; break; - case 109: /* column ::= ids typename */ + case 110: /* column ::= ids typename */ { - tSQLSetColumnInfo(&yygotominor.yy397, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy397); + tSQLSetColumnInfo(&yylhsminor.yy505, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy505); } - break; - case 110: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yygotominor.yy480 = tVariantListAppend(yymsp[-2].minor.yy480, &yymsp[0].minor.yy236, -1); } - break; - case 111: /* tagitemlist ::= tagitem */ -{ yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[0].minor.yy236, -1); } - break; - case 112: /* tagitem ::= INTEGER */ - case 113: /* tagitem ::= FLOAT */ yytestcase(yyruleno==113); - case 114: /* tagitem ::= STRING */ yytestcase(yyruleno==114); - case 115: /* tagitem ::= BOOL */ yytestcase(yyruleno==115); -{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yygotominor.yy236, &yymsp[0].minor.yy0); } - break; - case 116: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yygotominor.yy236, &yymsp[0].minor.yy0); } - break; - case 117: /* tagitem ::= MINUS INTEGER */ - case 118: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==118); - case 119: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==119); - case 120: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==120); + yymsp[-1].minor.yy505 = yylhsminor.yy505; + break; + case 111: /* tagitemlist ::= tagitemlist COMMA tagitem */ +{ yylhsminor.yy30 = tVariantListAppend(yymsp[-2].minor.yy30, &yymsp[0].minor.yy380, -1); } + yymsp[-2].minor.yy30 = yylhsminor.yy30; + break; + case 112: /* tagitemlist ::= tagitem */ +{ yylhsminor.yy30 = tVariantListAppend(NULL, &yymsp[0].minor.yy380, -1); } + yymsp[0].minor.yy30 = yylhsminor.yy30; + break; + case 113: /* tagitem ::= INTEGER */ + case 114: /* tagitem ::= FLOAT */ yytestcase(yyruleno==114); + case 115: /* tagitem ::= STRING */ yytestcase(yyruleno==115); + case 116: /* tagitem ::= BOOL */ yytestcase(yyruleno==116); +{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy380, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy380 = yylhsminor.yy380; + break; + case 117: /* tagitem ::= NULL */ +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy380, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy380 = yylhsminor.yy380; + break; + case 118: /* tagitem ::= MINUS INTEGER */ + case 119: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==119); + case 120: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==120); + case 121: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==121); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yygotominor.yy236, &yymsp[-1].minor.yy0); -} - break; - case 121: /* cmd ::= select */ -{ - setSQLInfo(pInfo, yymsp[0].minor.yy138, NULL, TSQL_QUERY_METER); + tVariantCreate(&yylhsminor.yy380, &yymsp[-1].minor.yy0); } + yymsp[-1].minor.yy380 = yylhsminor.yy380; break; case 122: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yygotominor.yy138 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy284, yymsp[-9].minor.yy480, yymsp[-8].minor.yy244, yymsp[-4].minor.yy480, yymsp[-3].minor.yy480, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy480, &yymsp[0].minor.yy162, &yymsp[-1].minor.yy162); + yylhsminor.yy444 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy506, yymsp[-9].minor.yy30, yymsp[-8].minor.yy388, yymsp[-4].minor.yy30, yymsp[-3].minor.yy30, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy30, &yymsp[0].minor.yy150, &yymsp[-1].minor.yy150); } + yymsp[-11].minor.yy444 = yylhsminor.yy444; + break; + case 123: /* union ::= select */ +{ yylhsminor.yy309 = setSubclause(NULL, yymsp[0].minor.yy444); } + yymsp[0].minor.yy309 = yylhsminor.yy309; + break; + case 124: /* union ::= LP union RP */ +{ yymsp[-2].minor.yy309 = yymsp[-1].minor.yy309; } + break; + case 125: /* union ::= union UNION ALL select */ +{ yylhsminor.yy309 = appendSelectClause(yymsp[-3].minor.yy309, yymsp[0].minor.yy444); } + yymsp[-3].minor.yy309 = yylhsminor.yy309; + break; + case 126: /* union ::= union UNION ALL LP select RP */ +{ yylhsminor.yy309 = appendSelectClause(yymsp[-5].minor.yy309, yymsp[-1].minor.yy444); } + yymsp[-5].minor.yy309 = yylhsminor.yy309; + break; + case 127: /* cmd ::= union */ +{ setSQLInfo(pInfo, yymsp[0].minor.yy309, NULL, TSDB_SQL_SELECT); } break; - case 123: /* select ::= SELECT selcollist */ + case 128: /* select ::= SELECT selcollist */ { - yygotominor.yy138 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy284, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy444 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy506, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } + yymsp[-1].minor.yy444 = yylhsminor.yy444; break; - case 124: /* sclp ::= selcollist COMMA */ -{yygotominor.yy284 = yymsp[-1].minor.yy284;} + case 129: /* sclp ::= selcollist COMMA */ +{yylhsminor.yy506 = yymsp[-1].minor.yy506;} + yymsp[-1].minor.yy506 = yylhsminor.yy506; break; - case 125: /* sclp ::= */ -{yygotominor.yy284 = 0;} + case 130: /* sclp ::= */ +{yymsp[1].minor.yy506 = 0;} break; - case 126: /* selcollist ::= sclp expr as */ + case 131: /* selcollist ::= sclp expr as */ { - yygotominor.yy284 = tSQLExprListAppend(yymsp[-2].minor.yy284, yymsp[-1].minor.yy244, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy506 = tSQLExprListAppend(yymsp[-2].minor.yy506, yymsp[-1].minor.yy388, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } + yymsp[-2].minor.yy506 = yylhsminor.yy506; break; - case 127: /* selcollist ::= sclp STAR */ + case 132: /* selcollist ::= sclp STAR */ { tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); - yygotominor.yy284 = tSQLExprListAppend(yymsp[-1].minor.yy284, pNode, 0); + yylhsminor.yy506 = tSQLExprListAppend(yymsp[-1].minor.yy506, pNode, 0); } + yymsp[-1].minor.yy506 = yylhsminor.yy506; break; - case 128: /* as ::= AS ids */ - case 129: /* as ::= ids */ yytestcase(yyruleno==129); -{ yygotominor.yy0 = yymsp[0].minor.yy0; } + case 133: /* as ::= AS ids */ +{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 130: /* as ::= */ -{ yygotominor.yy0.n = 0; } + case 134: /* as ::= ids */ +{ yylhsminor.yy0 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 131: /* from ::= FROM tablelist */ - case 143: /* orderby_opt ::= ORDER BY sortlist */ yytestcase(yyruleno==143); - case 151: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==151); -{yygotominor.yy480 = yymsp[0].minor.yy480;} + case 135: /* as ::= */ +{ yymsp[1].minor.yy0.n = 0; } break; - case 132: /* tablelist ::= ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy480 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} + case 136: /* from ::= FROM tablelist */ +{yymsp[-1].minor.yy30 = yymsp[0].minor.yy30;} break; - case 133: /* tablelist ::= tablelist COMMA ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yygotominor.yy480 = tVariantListAppendToken(yymsp[-3].minor.yy480, &yymsp[-1].minor.yy0, -1); } + case 137: /* tablelist ::= ids cpxName */ +{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy30 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} + yymsp[-1].minor.yy30 = yylhsminor.yy30; break; - case 134: /* tmvar ::= VARIABLE */ -{yygotominor.yy0 = yymsp[0].minor.yy0;} + case 138: /* tablelist ::= tablelist COMMA ids cpxName */ +{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy30 = tVariantListAppendToken(yymsp[-3].minor.yy30, &yymsp[-1].minor.yy0, -1); } + yymsp[-3].minor.yy30 = yylhsminor.yy30; break; - case 135: /* interval_opt ::= INTERVAL LP tmvar RP */ - case 140: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==140); -{yygotominor.yy0 = yymsp[-1].minor.yy0; } + case 139: /* tmvar ::= VARIABLE */ +{yylhsminor.yy0 = yymsp[0].minor.yy0;} + yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 136: /* interval_opt ::= */ - case 141: /* sliding_opt ::= */ yytestcase(yyruleno==141); -{yygotominor.yy0.n = 0; yygotominor.yy0.z = NULL; yygotominor.yy0.type = 0; } + case 140: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 145: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==145); +{yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 137: /* fill_opt ::= */ -{yygotominor.yy480 = 0; } + case 141: /* interval_opt ::= */ + case 146: /* sliding_opt ::= */ yytestcase(yyruleno==146); +{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 138: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 142: /* fill_opt ::= */ +{yymsp[1].minor.yy30 = 0; } + break; + case 143: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy480, &A, -1, 0); - yygotominor.yy480 = yymsp[-1].minor.yy480; + tVariantListInsert(yymsp[-1].minor.yy30, &A, -1, 0); + yymsp[-5].minor.yy30 = yymsp[-1].minor.yy30; } break; - case 139: /* fill_opt ::= FILL LP ID RP */ + case 144: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yygotominor.yy480 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy30 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 142: /* orderby_opt ::= */ - case 150: /* groupby_opt ::= */ yytestcase(yyruleno==150); -{yygotominor.yy480 = 0;} + case 147: /* orderby_opt ::= */ + case 155: /* groupby_opt ::= */ yytestcase(yyruleno==155); +{yymsp[1].minor.yy30 = 0;} + break; + case 148: /* orderby_opt ::= ORDER BY sortlist */ + case 156: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==156); +{yymsp[-2].minor.yy30 = yymsp[0].minor.yy30;} break; - case 144: /* sortlist ::= sortlist COMMA item sortorder */ + case 149: /* sortlist ::= sortlist COMMA item sortorder */ { - yygotominor.yy480 = tVariantListAppend(yymsp[-3].minor.yy480, &yymsp[-1].minor.yy236, yymsp[0].minor.yy220); + yylhsminor.yy30 = tVariantListAppend(yymsp[-3].minor.yy30, &yymsp[-1].minor.yy380, yymsp[0].minor.yy250); } + yymsp[-3].minor.yy30 = yylhsminor.yy30; break; - case 145: /* sortlist ::= item sortorder */ + case 150: /* sortlist ::= item sortorder */ { - yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[-1].minor.yy236, yymsp[0].minor.yy220); + yylhsminor.yy30 = tVariantListAppend(NULL, &yymsp[-1].minor.yy380, yymsp[0].minor.yy250); } + yymsp[-1].minor.yy30 = yylhsminor.yy30; break; - case 146: /* item ::= ids cpxName */ + case 151: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yygotominor.yy236, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy380, &yymsp[-1].minor.yy0); } + yymsp[-1].minor.yy380 = yylhsminor.yy380; break; - case 147: /* sortorder ::= ASC */ -{yygotominor.yy220 = TSQL_SO_ASC; } + case 152: /* sortorder ::= ASC */ +{yymsp[0].minor.yy250 = TSQL_SO_ASC; } break; - case 148: /* sortorder ::= DESC */ -{yygotominor.yy220 = TSQL_SO_DESC;} + case 153: /* sortorder ::= DESC */ +{yymsp[0].minor.yy250 = TSQL_SO_DESC;} break; - case 149: /* sortorder ::= */ -{yygotominor.yy220 = TSQL_SO_ASC;} + case 154: /* sortorder ::= */ +{yymsp[1].minor.yy250 = TSQL_SO_ASC;} break; - case 152: /* grouplist ::= grouplist COMMA item */ + case 157: /* grouplist ::= grouplist COMMA item */ { - yygotominor.yy480 = tVariantListAppend(yymsp[-2].minor.yy480, &yymsp[0].minor.yy236, -1); + yylhsminor.yy30 = tVariantListAppend(yymsp[-2].minor.yy30, &yymsp[0].minor.yy380, -1); } + yymsp[-2].minor.yy30 = yylhsminor.yy30; break; - case 153: /* grouplist ::= item */ + case 158: /* grouplist ::= item */ { - yygotominor.yy480 = tVariantListAppend(NULL, &yymsp[0].minor.yy236, -1); + yylhsminor.yy30 = tVariantListAppend(NULL, &yymsp[0].minor.yy380, -1); } + yymsp[0].minor.yy30 = yylhsminor.yy30; break; - case 154: /* having_opt ::= */ - case 164: /* where_opt ::= */ yytestcase(yyruleno==164); - case 200: /* expritem ::= */ yytestcase(yyruleno==200); -{yygotominor.yy244 = 0;} + case 159: /* having_opt ::= */ + case 169: /* where_opt ::= */ yytestcase(yyruleno==169); + case 205: /* expritem ::= */ yytestcase(yyruleno==205); +{yymsp[1].minor.yy388 = 0;} break; - case 155: /* having_opt ::= HAVING expr */ - case 165: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==165); - case 199: /* expritem ::= expr */ yytestcase(yyruleno==199); -{yygotominor.yy244 = yymsp[0].minor.yy244;} + case 160: /* having_opt ::= HAVING expr */ + case 170: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==170); +{yymsp[-1].minor.yy388 = yymsp[0].minor.yy388;} break; - case 156: /* limit_opt ::= */ - case 160: /* slimit_opt ::= */ yytestcase(yyruleno==160); -{yygotominor.yy162.limit = -1; yygotominor.yy162.offset = 0;} + case 161: /* limit_opt ::= */ + case 165: /* slimit_opt ::= */ yytestcase(yyruleno==165); +{yymsp[1].minor.yy150.limit = -1; yymsp[1].minor.yy150.offset = 0;} break; - case 157: /* limit_opt ::= LIMIT signed */ - case 161: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==161); -{yygotominor.yy162.limit = yymsp[0].minor.yy369; yygotominor.yy162.offset = 0;} + case 162: /* limit_opt ::= LIMIT signed */ + case 166: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==166); +{yymsp[-1].minor.yy150.limit = yymsp[0].minor.yy489; yymsp[-1].minor.yy150.offset = 0;} break; - case 158: /* limit_opt ::= LIMIT signed OFFSET signed */ - case 162: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==162); -{yygotominor.yy162.limit = yymsp[-2].minor.yy369; yygotominor.yy162.offset = yymsp[0].minor.yy369;} + case 163: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 167: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==167); +{yymsp[-3].minor.yy150.limit = yymsp[-2].minor.yy489; yymsp[-3].minor.yy150.offset = yymsp[0].minor.yy489;} break; - case 159: /* limit_opt ::= LIMIT signed COMMA signed */ - case 163: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==163); -{yygotominor.yy162.limit = yymsp[0].minor.yy369; yygotominor.yy162.offset = yymsp[-2].minor.yy369;} + case 164: /* limit_opt ::= LIMIT signed COMMA signed */ + case 168: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==168); +{yymsp[-3].minor.yy150.limit = yymsp[0].minor.yy489; yymsp[-3].minor.yy150.offset = yymsp[-2].minor.yy489;} break; - case 166: /* expr ::= LP expr RP */ -{yygotominor.yy244 = yymsp[-1].minor.yy244; } + case 171: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy388 = yymsp[-1].minor.yy388; } break; - case 167: /* expr ::= ID */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + case 172: /* expr ::= ID */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 168: /* expr ::= ID DOT ID */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + case 173: /* expr ::= ID DOT ID */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 169: /* expr ::= ID DOT STAR */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + case 174: /* expr ::= ID DOT STAR */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 170: /* expr ::= INTEGER */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + case 175: /* expr ::= INTEGER */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 171: /* expr ::= MINUS INTEGER */ - case 172: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==172); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + case 176: /* expr ::= MINUS INTEGER */ + case 177: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==177); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy388 = yylhsminor.yy388; break; - case 173: /* expr ::= FLOAT */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + case 178: /* expr ::= FLOAT */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 174: /* expr ::= MINUS FLOAT */ - case 175: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==175); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + case 179: /* expr ::= MINUS FLOAT */ + case 180: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==180); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy388 = yylhsminor.yy388; break; - case 176: /* expr ::= STRING */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + case 181: /* expr ::= STRING */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 177: /* expr ::= NOW */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + case 182: /* expr ::= NOW */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 178: /* expr ::= VARIABLE */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + case 183: /* expr ::= VARIABLE */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 179: /* expr ::= BOOL */ -{yygotominor.yy244 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + case 184: /* expr ::= BOOL */ +{yylhsminor.yy388 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; - case 180: /* expr ::= ID LP exprlist RP */ + case 185: /* expr ::= ID LP exprlist RP */ { - yygotominor.yy244 = tSQLExprCreateFunction(yymsp[-1].minor.yy284, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); + yylhsminor.yy388 = tSQLExprCreateFunction(yymsp[-1].minor.yy506, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy388 = yylhsminor.yy388; break; - case 181: /* expr ::= ID LP STAR RP */ + case 186: /* expr ::= ID LP STAR RP */ { - yygotominor.yy244 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); + yylhsminor.yy388 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy388 = yylhsminor.yy388; break; - case 182: /* expr ::= expr AND expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_AND);} - break; - case 183: /* expr ::= expr OR expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_OR); } + case 187: /* expr ::= expr AND expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_AND);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 184: /* expr ::= expr LT expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LT);} + case 188: /* expr ::= expr OR expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_OR); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 185: /* expr ::= expr GT expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_GT);} + case 189: /* expr ::= expr LT expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_LT);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 186: /* expr ::= expr LE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LE);} + case 190: /* expr ::= expr GT expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_GT);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 187: /* expr ::= expr GE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_GE);} + case 191: /* expr ::= expr LE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_LE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 188: /* expr ::= expr NE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_NE);} + case 192: /* expr ::= expr GE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_GE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 189: /* expr ::= expr EQ expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_EQ);} + case 193: /* expr ::= expr NE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_NE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 190: /* expr ::= expr PLUS expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_PLUS); } + case 194: /* expr ::= expr EQ expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_EQ);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 191: /* expr ::= expr MINUS expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_MINUS); } + case 195: /* expr ::= expr PLUS expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_PLUS); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 192: /* expr ::= expr STAR expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_STAR); } + case 196: /* expr ::= expr MINUS expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_MINUS); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 193: /* expr ::= expr SLASH expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_DIVIDE);} + case 197: /* expr ::= expr STAR expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_STAR); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 194: /* expr ::= expr REM expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_REM); } + case 198: /* expr ::= expr SLASH expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_DIVIDE);} + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 195: /* expr ::= expr LIKE expr */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-2].minor.yy244, yymsp[0].minor.yy244, TK_LIKE); } + case 199: /* expr ::= expr REM expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_REM); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 196: /* expr ::= expr IN LP exprlist RP */ -{yygotominor.yy244 = tSQLExprCreate(yymsp[-4].minor.yy244, (tSQLExpr*)yymsp[-1].minor.yy284, TK_IN); } + case 200: /* expr ::= expr LIKE expr */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-2].minor.yy388, yymsp[0].minor.yy388, TK_LIKE); } + yymsp[-2].minor.yy388 = yylhsminor.yy388; break; - case 197: /* exprlist ::= exprlist COMMA expritem */ - case 204: /* itemlist ::= itemlist COMMA expr */ yytestcase(yyruleno==204); -{yygotominor.yy284 = tSQLExprListAppend(yymsp[-2].minor.yy284,yymsp[0].minor.yy244,0);} + case 201: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy388 = tSQLExprCreate(yymsp[-4].minor.yy388, (tSQLExpr*)yymsp[-1].minor.yy506, TK_IN); } + yymsp[-4].minor.yy388 = yylhsminor.yy388; break; - case 198: /* exprlist ::= expritem */ - case 205: /* itemlist ::= expr */ yytestcase(yyruleno==205); -{yygotominor.yy284 = tSQLExprListAppend(0,yymsp[0].minor.yy244,0);} + case 202: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy506 = tSQLExprListAppend(yymsp[-2].minor.yy506,yymsp[0].minor.yy388,0);} + yymsp[-2].minor.yy506 = yylhsminor.yy506; break; - case 201: /* cmd ::= INSERT INTO cpxName insert_value_list */ -{ - tSetInsertSQLElems(pInfo, &yymsp[-1].minor.yy0, yymsp[0].minor.yy237); -} + case 203: /* exprlist ::= expritem */ +{yylhsminor.yy506 = tSQLExprListAppend(0,yymsp[0].minor.yy388,0);} + yymsp[0].minor.yy506 = yylhsminor.yy506; break; - case 202: /* insert_value_list ::= VALUES LP itemlist RP */ -{yygotominor.yy237 = tSQLListListAppend(NULL, yymsp[-1].minor.yy284);} - break; - case 203: /* insert_value_list ::= insert_value_list VALUES LP itemlist RP */ -{yygotominor.yy237 = tSQLListListAppend(yymsp[-4].minor.yy237, yymsp[-1].minor.yy284);} + case 204: /* expritem ::= expr */ +{yylhsminor.yy388 = yymsp[0].minor.yy388;} + yymsp[0].minor.yy388 = yylhsminor.yy388; break; case 206: /* cmd ::= RESET QUERY CACHE */ -{ setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);} +{ setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; case 207: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, ALTER_TABLE_ADD_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy325, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; case 208: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ @@ -2221,15 +2661,15 @@ static void yy_reduce( toTSDBType(yymsp[0].minor.yy0.type); tVariantList* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, ALTER_TABLE_DROP_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; case 209: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, ALTER_TABLE_TAGS_ADD); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy325, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; case 210: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ @@ -2239,8 +2679,8 @@ static void yy_reduce( toTSDBType(yymsp[0].minor.yy0.type); tVariantList* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, ALTER_TABLE_TAGS_DROP); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; case 211: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ @@ -2253,8 +2693,8 @@ static void yy_reduce( toTSDBType(yymsp[0].minor.yy0.type); A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-5].minor.yy0, NULL, A, ALTER_TABLE_TAGS_CHG); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; case 212: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ @@ -2263,50 +2703,42 @@ static void yy_reduce( toTSDBType(yymsp[-2].minor.yy0.type); tVariantList* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy236, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy380, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, ALTER_TABLE_TAGS_SET); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; case 213: /* cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */ -{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &yymsp[-2].minor.yy0);} +{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[-2].minor.yy0);} break; case 214: /* cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */ -{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &yymsp[-4].minor.yy0);} +{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-4].minor.yy0);} break; case 215: /* cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */ -{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &yymsp[-4].minor.yy0);} +{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-4].minor.yy0);} break; default: break; /********** End reduce actions ************************************************/ }; - assert( yyruleno>=0 && yyrulenoyyidx -= yysize; - yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); - if( yyact <= YY_MAX_SHIFTREDUCE ){ - if( yyact>YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; - /* If the reduce action popped at least - ** one element off the stack, then we can push the new element back - ** onto the stack here, and skip the stack overflow test in yy_shift(). - ** That gives a significant speed improvement. */ - if( yysize ){ - yypParser->yyidx++; - yymsp -= yysize-1; - yymsp->stateno = (YYACTIONTYPE)yyact; - yymsp->major = (YYCODETYPE)yygoto; - yymsp->minor = yygotominor; - yyTraceShift(yypParser, yyact); - }else{ - yy_shift(yypParser,yyact,yygoto,&yygotominor); - } - }else{ - assert( yyact == YY_ACCEPT_ACTION ); - yy_accept(yypParser); - } + yyact = yy_find_reduce_action(yymsp[yysize].stateno,(YYCODETYPE)yygoto); + + /* There are no SHIFTREDUCE actions on nonterminals because the table + ** generator has simplified them to pure REDUCE actions. */ + assert( !(yyact>YY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) ); + + /* It is not possible for a REDUCE to be followed by an error */ + assert( yyact!=YY_ERROR_ACTION ); + + yymsp += yysize+1; + yypParser->yytos = yymsp; + yymsp->stateno = (YYACTIONTYPE)yyact; + yymsp->major = (YYCODETYPE)yygoto; + yyTraceShift(yypParser, yyact, "... then shift"); } /* @@ -2322,7 +2754,7 @@ static void yy_parse_failed( fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will be executed whenever the ** parser fails */ /************ Begin %parse_failure code ***************************************/ @@ -2337,13 +2769,13 @@ static void yy_parse_failed( static void yy_syntax_error( yyParser *yypParser, /* The parser */ int yymajor, /* The major type of the error token */ - YYMINORTYPE yyminor /* The minor type of the error token */ + ParseTOKENTYPE yyminor /* The minor type of the error token */ ){ ParseARG_FETCH; -#define TOKEN (yyminor.yy0) +#define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ - pInfo->validSql = false; + pInfo->valid = false; int32_t outputBufLen = tListLen(pInfo->pzErrMsg); int32_t len = 0; @@ -2380,7 +2812,10 @@ static void yy_accept( fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + assert( yypParser->yytos==yypParser->yystack ); /* Here code is inserted which will be executed whenever the ** parser accepts */ /*********** Begin %parse_accept code *****************************************/ @@ -2415,7 +2850,7 @@ void Parse( ParseARG_PDECL /* Optional %extra_argument parameter */ ){ YYMINORTYPE yyminorunion; - int yyact; /* The parser action. */ + unsigned int yyact; /* The parser action. */ #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) int yyendofinput; /* True if we are at the end of input */ #endif @@ -2424,29 +2859,8 @@ void Parse( #endif yyParser *yypParser; /* The parser */ - /* (re)initialize the parser, if necessary */ yypParser = (yyParser*)yyp; - if( yypParser->yyidx<0 ){ -#if YYSTACKDEPTH<=0 - if( yypParser->yystksz <=0 ){ - /*memset(&yyminorunion, 0, sizeof(yyminorunion));*/ - yyminorunion = yyzerominor; - yyStackOverflow(yypParser, &yyminorunion); - return; - } -#endif - yypParser->yyidx = 0; - yypParser->yyerrcnt = -1; - yypParser->yystack[0].stateno = 0; - yypParser->yystack[0].major = 0; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sInitialize. Empty stack. State 0\n", - yyTracePrompt); - } -#endif - } - yyminorunion.yy0 = yyminor; + assert( yypParser->yytos!=0 ); #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif @@ -2454,21 +2868,34 @@ void Parse( #ifndef NDEBUG if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sInput '%s'\n",yyTracePrompt,yyTokenName[yymajor]); + int stateno = yypParser->yytos->stateno; + if( stateno < YY_MIN_REDUCE ){ + fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", + yyTracePrompt,yyTokenName[yymajor],stateno); + }else{ + fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", + yyTracePrompt,yyTokenName[yymajor],stateno-YY_MIN_REDUCE); + } } #endif do{ yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); - if( yyact <= YY_MAX_SHIFTREDUCE ){ - if( yyact > YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; - yy_shift(yypParser,yyact,yymajor,&yyminorunion); + if( yyact >= YY_MIN_REDUCE ){ + yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,yyminor); + }else if( yyact <= YY_MAX_SHIFTREDUCE ){ + yy_shift(yypParser,yyact,yymajor,yyminor); +#ifndef YYNOERRORRECOVERY yypParser->yyerrcnt--; +#endif yymajor = YYNOCODE; - }else if( yyact <= YY_MAX_REDUCE ){ - yy_reduce(yypParser,yyact-YY_MIN_REDUCE); + }else if( yyact==YY_ACCEPT_ACTION ){ + yypParser->yytos--; + yy_accept(yypParser); + return; }else{ assert( yyact == YY_ERROR_ACTION ); + yyminorunion.yy0 = yyminor; #ifdef YYERRORSYMBOL int yymx; #endif @@ -2498,9 +2925,9 @@ void Parse( ** */ if( yypParser->yyerrcnt<0 ){ - yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_syntax_error(yypParser,yymajor,yyminor); } - yymx = yypParser->yystack[yypParser->yyidx].major; + yymx = yypParser->yytos->major; if( yymx==YYERRORSYMBOL || yyerrorhit ){ #ifndef NDEBUG if( yyTraceFILE ){ @@ -2508,26 +2935,26 @@ void Parse( yyTracePrompt,yyTokenName[yymajor]); } #endif - yy_destructor(yypParser, (YYCODETYPE)yymajor,&yyminorunion); + yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); yymajor = YYNOCODE; }else{ - while( - yypParser->yyidx >= 0 && - yymx != YYERRORSYMBOL && - (yyact = yy_find_reduce_action( - yypParser->yystack[yypParser->yyidx].stateno, + while( yypParser->yytos >= yypParser->yystack + && yymx != YYERRORSYMBOL + && (yyact = yy_find_reduce_action( + yypParser->yytos->stateno, YYERRORSYMBOL)) >= YY_MIN_REDUCE ){ yy_pop_parser_stack(yypParser); } - if( yypParser->yyidx < 0 || yymajor==0 ){ + if( yypParser->yytos < yypParser->yystack || yymajor==0 ){ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif yymajor = YYNOCODE; }else if( yymx!=YYERRORSYMBOL ){ - YYMINORTYPE u2; - u2.YYERRSYMDT = 0; - yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2); + yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor); } } yypParser->yyerrcnt = 3; @@ -2540,7 +2967,7 @@ void Parse( ** Applications can set this macro (for example inside %include) if ** they intend to abandon the parse upon the first syntax error seen. */ - yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_syntax_error(yypParser,yymajor, yyminor); yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yymajor = YYNOCODE; @@ -2555,24 +2982,29 @@ void Parse( ** three input tokens have been successfully shifted. */ if( yypParser->yyerrcnt<=0 ){ - yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_syntax_error(yypParser,yymajor, yyminor); } yypParser->yyerrcnt = 3; yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); if( yyendofinput ){ yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif } yymajor = YYNOCODE; #endif } - }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 ); + }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); #ifndef NDEBUG if( yyTraceFILE ){ - int i; + yyStackEntry *i; + char cDiv = '['; fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); - for(i=1; i<=yypParser->yyidx; i++) - fprintf(yyTraceFILE,"%c%s", i==1 ? '[' : ' ', - yyTokenName[yypParser->yystack[i].major]); + for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){ + fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]); + cDiv = ' '; + } fprintf(yyTraceFILE,"]\n"); } #endif diff --git a/src/client/src/taos.def b/src/client/src/taos.def index f6de4e866587ce79d224311241510ad0170efa66..39906c7486dc242513f31028c367607fa0197dc9 100644 --- a/src/client/src/taos.def +++ b/src/client/src/taos.def @@ -24,8 +24,6 @@ taos_fetch_row_a taos_subscribe taos_consume taos_unsubscribe -taos_subfields_count -taos_fetch_subfields taos_open_stream taos_close_stream taos_fetch_block diff --git a/src/client/src/tscAst.c b/src/client/src/tscAst.c index 48e147a33789f1fa645c581cc62a38ce9a8c91fc..cf0873b5b620ddc118863e3c2cae86080f1d481f 100644 --- a/src/client/src/tscAst.c +++ b/src/client/src/tscAst.c @@ -17,6 +17,7 @@ #include "taosmsg.h" #include "tast.h" #include "tlog.h" +#include "tscSQLParser.h" #include "tscSyntaxtreefunction.h" #include "tschemautil.h" #include "tsdb.h" @@ -26,7 +27,6 @@ #include "tstoken.h" #include "ttypes.h" #include "tutil.h" -#include "tscSQLParser.h" /* * @@ -108,13 +108,16 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, return NULL; } - int32_t i = 0; size_t nodeSize = sizeof(tSQLSyntaxNode); tSQLSyntaxNode *pNode = NULL; if (pToken->type == TK_ID || pToken->type == TK_TBNAME) { + int32_t i = 0; if (pToken->type == TK_ID) { do { + SSQLToken tableToken = {0}; + extractTableNameFromToken(pToken, &tableToken); + size_t len = strlen(pSchema[i].name); if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break; } while (++i < numOfCols); @@ -269,7 +272,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha // get the operator of expr uint8_t optr = getBinaryExprOptr(&t0); - if (optr <= 0) { + if (optr == 0) { pError("not support binary operator:%d", t0.type); tSQLSyntaxNodeDestroy(pLeft, NULL); return NULL; @@ -324,8 +327,9 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha return pn; } else { uint8_t localOptr = getBinaryExprOptr(&t0); - if (localOptr <= 0) { + if (localOptr == 0) { pError("not support binary operator:%d", t0.type); + free(pBinExpr); return NULL; } @@ -418,6 +422,7 @@ void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) { if (pExpr == NULL) { *dst = 0; *len = 0; + return; } int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType); @@ -490,12 +495,12 @@ static void setInitialValueForRangeQueryCondition(tSKipListQueryCond *q, int8_t case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: { q->upperBnd.nType = type; - q->upperBnd.pz = "\0"; + q->upperBnd.pz = NULL; q->upperBnd.nLen = -1; q->lowerBnd.nType = type; - q->lowerBnd.pz = "\0"; - q->lowerBnd.nLen = 0; + q->lowerBnd.pz = NULL; + q->lowerBnd.nLen = -1; } } } @@ -641,16 +646,15 @@ int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResults } /* - * + * traverse the result and apply the function to each item to check if the item is qualified or not */ -void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, bool (*fp)(tSkipListNode *, void *), - tQueryResultset * pResult) { +static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) { assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE); - // brutal force search + // brutal force scan the result list and check for each item in the list int64_t num = pResult->num; for (int32_t i = 0, j = 0; i < pResult->num; ++i) { - if (fp == NULL || (fp != NULL && fp(pResult->pRes[i], pExpr->info) == true)) { + if (fp == NULL || (fp(pResult->pRes[i], pExpr->info) == true)) { pResult->pRes[j++] = pResult->pRes[i]; } else { num--; @@ -832,7 +836,7 @@ void tSQLBinaryExprCalcTraverse(tSQLBinaryExpr *pExprs, int32_t numOfRows, char tSQLSyntaxNode *pRight = pExprs->pRight; /* the left output has result from the left child syntax tree */ - char *pLeftOutput = malloc(sizeof(int64_t) * numOfRows); + char *pLeftOutput = (char*)malloc(sizeof(int64_t) * numOfRows); if (pLeft->nodeType == TSQL_NODE_EXPR) { tSQLBinaryExprCalcTraverse(pLeft->pExpr, numOfRows, pLeftOutput, param, order, getSourceDataBlock); } @@ -933,4 +937,4 @@ void tQueryResultClean(tQueryResultset *pRes) { tfree(pRes->pRes); pRes->num = 0; -} \ No newline at end of file +} diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index d11a279247ff170ba4b931b11a93589e3e2b3693..94ebaefd369975c6873e6dce7ff36c3b513ad91e 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -26,19 +26,18 @@ #include "tutil.h" #include "tnote.h" -void tscProcessFetchRow(SSchedMsg *pMsg); -void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows); -static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows); -static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows); +static void tscProcessFetchRow(SSchedMsg *pMsg); +static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows); static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)()); /* - * proxy function to perform sequentially query&retrieve operation. - * If sql queries upon metric and two-stage merge procedure is not needed, - * it will sequentially query&retrieve data for all vnodes in pCmd->pMetricMeta + * Proxy function to perform sequentially query&retrieve operation. + * If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection + * query), it will sequentially query&retrieve data for all vnodes */ -static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows); +static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows); +static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows); // TODO return the correct error code to client in tscQueueAsyncError void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) { @@ -51,7 +50,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, } int32_t sqlLen = strlen(sqlstr); - if (sqlLen > TSDB_MAX_SQL_LEN) { + if (sqlLen > tsMaxSQLStringLen) { tscError("sql string too long"); tscQueueAsyncError(fp, param); return; @@ -81,7 +80,6 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, return; } - pSql->sqlstr = malloc(sqlLen + 1); if (pSql->sqlstr == NULL) { tscError("%p failed to malloc sql string buffer", pSql); @@ -95,9 +93,9 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, pRes->numOfRows = 1; strtolower(pSql->sqlstr, sqlstr); - tscTrace("%p Async SQL: %s, pObj:%p", pSql, pSql->sqlstr, pObj); + tscDump("%p pObj:%p, Async SQL: %s", pSql, pObj, pSql->sqlstr); - int32_t code = tsParseSql(pSql, pObj->acctId, pObj->db, true); + int32_t code = tsParseSql(pSql, true); if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; if (code != TSDB_CODE_SUCCESS) { @@ -109,7 +107,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, tscDoQuery(pSql); } -static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) { +static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) { if (tres == NULL) { return; } @@ -118,35 +116,32 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - // sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx - if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) { - // vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx - assert(pCmd->vnodeIdx >= 0); - - /* reach the maximum number of output rows, abort */ - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { - (*pSql->fetchFp)(param, tres, 0); - return; - } - - /* update the limit value according to current retrieval results */ - pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - pCmd->limit.offset = pRes->offset; - - if ((++(pCmd->vnodeIdx)) < tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) { - tscTrace("%p retrieve data from next vnode:%d", pSql, pCmd->vnodeIdx); - - pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. + if (numOfRows == 0) { + if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. + tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode); + } else { + /* + * all available virtual node has been checked already, now we need to check + * for the next subclause queries + */ + if (pCmd->clauseIndex < pCmd->numOfClause - 1) { + tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode); + return; + } - tscResetForNextRetrieve(pRes); - pSql->fp = tscProcessAsyncRetrieveNextVnode; - tscProcessSql(pSql); - return; - } - } else { // localreducer has handle this situation - if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { - pRes->numOfTotal += pRes->numOfRows; + /* + * 1. has reach the limitation + * 2. no remain virtual nodes to be retrieved anymore + */ + (*pSql->fetchFp)(param, pSql, 0); } + + return; + } + + // local reducer has handle this situation during super table non-projection query. + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { + pRes->numOfTotalInCurrentClause += pRes->numOfRows; } (*pSql->fetchFp)(param, tres, numOfRows); @@ -157,14 +152,13 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo SSqlObj *pSql = (SSqlObj *)tres; if (pSql == NULL) { // error tscError("sql object is NULL"); - tscQueueAsyncError(pSql->fetchFp, param); return; } SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - if (pRes->qhandle == 0 || numOfRows != 0) { + if ((pRes->qhandle == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) { if (pRes->qhandle == 0) { tscError("qhandle is NULL"); } else { @@ -183,14 +177,18 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo } /* - * retrieve callback for fetch rows proxy. It serves as the callback function of querying vnode + * retrieve callback for fetch rows proxy. + * The below two functions both serve as the callback function of query virtual node. + * query callback first, and then followed by retrieve callback */ -static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows) { - tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncFetchRowsProxy); +static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows) { + // query completed, continue to retrieve + tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy); } -static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows) { - tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncRetrieve); +void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows) { + // query completed, continue to retrieve + tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy); } void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), void *param) { @@ -213,7 +211,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi // user-defined callback function is stored in fetchFp pSql->fetchFp = fp; - pSql->fp = tscProcessAsyncFetchRowsProxy; + pSql->fp = tscAsyncFetchRowsProxy; pSql->param = param; tscResetForNextRetrieve(pRes); @@ -245,11 +243,15 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), pSql->fetchFp = fp; pSql->param = param; - + if (pRes->row >= pRes->numOfRows) { tscResetForNextRetrieve(pRes); - pSql->fp = tscProcessAsyncRetrieve; - pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + pSql->fp = tscAsyncFetchSingleRowProxy; + + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC && pCmd->command < TSDB_SQL_LOCAL) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + tscProcessSql(pSql); } else { SSchedMsg schedMsg; @@ -261,57 +263,45 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), } } -void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) { +void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) { SSqlObj *pSql = (SSqlObj *)tres; SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (numOfRows == 0) { - // sequentially retrieve data from remain vnodes. - if (tscProjectionQueryOnMetric(pCmd)) { + if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. + tscTryQueryNextVnode(pSql, tscAsyncQuerySingleRowForNextVnode); + } else { /* - * vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved + * 1. has reach the limitation + * 2. no remain virtual nodes to be retrieved anymore */ - assert(pCmd->vnodeIdx >= 1); - - /* reach the maximum number of output rows, abort */ - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { - (*pSql->fetchFp)(pSql->param, pSql, NULL); - return; - } - - /* update the limit value according to current retrieval results */ - pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - - if ((++pCmd->vnodeIdx) <= tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) { - pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. - - tscResetForNextRetrieve(pRes); - pSql->fp = tscProcessAsyncContinueRetrieve; - tscProcessSql(pSql); - return; - } - } else { (*pSql->fetchFp)(pSql->param, pSql, NULL); } - } else { - for (int i = 0; i < pCmd->numOfCols; ++i) - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; - pRes->row++; - - (*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow); + return; } + + for (int i = 0; i < pCmd->numOfCols; ++i) + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row; + pRes->row++; + + (*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow); } void tscProcessFetchRow(SSchedMsg *pMsg) { SSqlObj *pSql = (SSqlObj *)pMsg->ahandle; SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - for (int i = 0; i < pCmd->numOfCols; ++i) - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; + for (int i = 0; i < pCmd->numOfCols; ++i) { + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row; + } + pRes->row++; - (*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow); } @@ -370,7 +360,7 @@ void tscQueueAsyncRes(SSqlObj *pSql) { tscTrace("%p SqlObj is freed, not add into queue async res", pSql); return; } else { - tscTrace("%p add into queued async res, code:%d", pSql, pSql->res.code); + tscError("%p add into queued async res, code:%d", pSql, pSql->res.code); } SSchedMsg schedMsg; @@ -403,10 +393,16 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) SSqlCmd *pCmd = &pSql->cmd; int32_t code = TSDB_CODE_SUCCESS; - assert(!pCmd->isInsertFromFile && pSql->signature == pSql); - + assert(pCmd->dataSourceType != 0 && pSql->signature == pSql); + + int32_t index = 0; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1 || pQueryInfo->numOfTables == 2); + SDataBlockList *pDataBlocks = pCmd->pDataBlocks; - if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { // restore user defined fp pSql->fp = pSql->fetchFp; tscTrace("%p Async insertion completed, destroy data block list", pSql); @@ -418,17 +414,17 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) (*pSql->fp)(pSql->param, tres, numOfRows); } else { do { - code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pCmd->vnodeIdx++]); + code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pMeterMetaInfo->vnodeIndex++]); if (code != TSDB_CODE_SUCCESS) { tscTrace("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%d", - pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize, code); + pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize, code); } - } while (code != TSDB_CODE_SUCCESS && pCmd->vnodeIdx < pDataBlocks->nSize); + } while (code != TSDB_CODE_SUCCESS && pMeterMetaInfo->vnodeIndex < pDataBlocks->nSize); // build submit msg may fail if (code == TSDB_CODE_SUCCESS) { - tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize); + tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize); tscProcessSql(pSql); } } @@ -440,7 +436,6 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlObj *pSql = (SSqlObj *)param; if (pSql == NULL || pSql->signature != pSql) return; - STscObj *pObj = pSql->pTscObj; SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -460,10 +455,11 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { } else { tscTrace("%p renew meterMeta successfully, command:%d, code:%d, thandle:%p, retry:%d", pSql, pSql->cmd.command, pSql->res.code, pSql->thandle, pSql->retry); - - assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta == NULL); - tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0); - + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + assert(pMeterMetaInfo->pMeterMeta == NULL); + + tscGetMeterMeta(pSql, pMeterMetaInfo); code = tscSendMsgToServer(pSql); if (code != 0) { pRes->code = code; @@ -481,49 +477,65 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { } if (pSql->pStream == NULL) { - // check if it is a sub-query of metric query first, if true, enter another routine - if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pCmd->vnodeIdx >= 0 && pSql->param != NULL); + // check if it is a sub-query of super table query first, if true, enter another routine + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL); SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SSqlObj * pParObj = trs->pParentSqlObj; - assert(pParObj->signature == pParObj && trs->vnodeIdx == pCmd->vnodeIdx && + + assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex && pMeterMetaInfo->pMeterMeta->numOfTags != 0); - tscTrace("%p get metricMeta during metric query successfully", pSql); - - code = tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0); + tscTrace("%p get metricMeta during super table query successfully", pSql); + + code = tscGetMeterMeta(pSql, pMeterMetaInfo); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; - code = tscGetMetricMeta(pSql); + code = tscGetMetricMeta(pSql, 0); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; } else { // normal async query continues - code = tsParseSql(pSql, pObj->acctId, pObj->db, false); - if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + if (pCmd->isParseFinish) { + tscTrace("%p resend data to vnode in metermeta callback since sql has been parsed completed", pSql); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + code = tscGetMeterMeta(pSql, pMeterMetaInfo); + assert(code == TSDB_CODE_SUCCESS); + + if (pMeterMetaInfo->pMeterMeta) { + code = tscSendMsgToServer(pSql); + if (code == TSDB_CODE_SUCCESS) return; + } + } else { + code = tsParseSql(pSql, false); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + } } } else { // stream computing - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + code = tscGetMeterMeta(pSql, pMeterMetaInfo); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - code = tscGetMetricMeta(pSql); + if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + code = tscGetMetricMeta(pSql, pCmd->clauseIndex); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; } } - if (code != 0) { + if (code != TSDB_CODE_SUCCESS) { + pSql->res.code = code; tscQueueAsyncRes(pSql); return; } @@ -532,10 +544,12 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { tscTrace("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command); /* * NOTE: - * transfer the sql function for metric query before get meter/metric meta, + * transfer the sql function for super table query before get meter/metric meta, * since in callback functions, only tscProcessSql(pStream->pSql) is executed! */ - tscTansformSQLFunctionForMetricQuery(&pSql->cmd); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + tscTansformSQLFunctionForSTableQuery(pQueryInfo); tscIncStreamExecutionCount(pSql->pStream); } else { tscTrace("%p get meterMeta/metricMeta successfully", pSql); diff --git a/src/client/src/tscCache.c b/src/client/src/tscCache.c index 1ac32d7502ee99c38f84445cfeb767ad316b06ed..666d069a58c936e9028b46f9e6244923ac4be993 100644 --- a/src/client/src/tscCache.c +++ b/src/client/src/tscCache.c @@ -96,11 +96,7 @@ void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, uint16_t port, pObj = (SConnCache *)handle; if (pObj == NULL || pObj->maxSessions == 0) return NULL; -#ifdef CLUSTER - if (data == NULL || ip == 0) { -#else if (data == NULL) { -#endif tscTrace("data:%p ip:%p:%d not valid, not added in cache", data, ip, port); return NULL; } diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 5d857a3145a1bba57e7fb5fb4650de4e1ab08dda..837a0ce0054b8fd7cc92a7b164f667cb6841a276 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -13,8 +13,6 @@ * along with this program. If not, see . */ -#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" - #include "os.h" #include "taosmsg.h" #include "tast.h" @@ -29,6 +27,7 @@ #include "ttime.h" #include "ttypes.h" #include "tutil.h" +#include "tpercentile.h" #define GET_INPUT_CHAR(x) (((char *)((x)->aInputElemBuf)) + ((x)->startOffset) * ((x)->inputBytes)) #define GET_INPUT_CHAR_INDEX(x, y) (GET_INPUT_CHAR(x) + (y) * (x)->inputBytes) @@ -71,7 +70,10 @@ for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \ } \ } while(0); -void noop(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {} +void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {} +void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {} + +void doFinalizer(SQLFunctionCtx *pCtx) { resetResultInfo(GET_RES_INFO(pCtx)); } typedef struct tValuePair { tVariant v; @@ -136,6 +138,19 @@ typedef struct STSCompInfo { STSBuf *pTSBuf; } STSCompInfo; +typedef struct SRateInfo { + int64_t CorrectionValue; + int64_t firstValue; + TSKEY firstKey; + int64_t lastValue; + TSKEY lastKey; + int8_t hasResult; // flag to denote has value + bool isIRate; // true for IRate functions, false for Rate functions + int64_t num; // for sum/avg + double sum; // for sum/avg +} SRateInfo; + + int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int16_t *bytes, int16_t *intermediateResBytes, int16_t extLength, bool isSuperTable) { if (!isValidDataType(dataType, dataBytes)) { @@ -190,7 +205,12 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = TSDB_DATA_TYPE_BINARY; *bytes = sizeof(SAvgInfo); *intermediateResBytes = *bytes; + return TSDB_CODE_SUCCESS; + } else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) { + *type = TSDB_DATA_TYPE_DOUBLE; + *bytes = sizeof(SRateInfo); + *intermediateResBytes = sizeof(SRateInfo); return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { *type = TSDB_DATA_TYPE_BINARY; @@ -219,7 +239,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } else if (functionId == TSDB_FUNC_TWA) { *type = TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(STwaInfo); - *intermediateResBytes = sizeof(STwaInfo); + *intermediateResBytes = *bytes; return TSDB_CODE_SUCCESS; } } @@ -251,6 +271,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(double); *intermediateResBytes = sizeof(SAvgInfo); + } else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) { + *type = TSDB_DATA_TYPE_DOUBLE; + *bytes = sizeof(double); + *intermediateResBytes = sizeof(SRateInfo); } else if (functionId == TSDB_FUNC_STDDEV) { *type = TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(double); @@ -270,7 +294,6 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } else if (functionId == TSDB_FUNC_PERCT) { *type = (int16_t)TSDB_DATA_TYPE_DOUBLE; *bytes = (int16_t)sizeof(double); - //*intermediateResBytes = POINTER_BYTES; *intermediateResBytes = (int16_t)sizeof(double); } else if (functionId == TSDB_FUNC_LEASTSQR) { *type = TSDB_DATA_TYPE_BINARY; @@ -356,8 +379,8 @@ static void function_finalizer(SQLFunctionCtx *pCtx) { pTrace("no result generated, result is set to NULL"); setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); } - - resetResultInfo(GET_RES_INFO(pCtx)); + + doFinalizer(pCtx); } /* @@ -509,10 +532,10 @@ static void do_sum(SQLFunctionCtx *pCtx) { assert(pCtx->size >= pCtx->preAggVals.numOfNull); if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { - int64_t *retVal = pCtx->aOutputBuf; + int64_t *retVal = (int64_t*) pCtx->aOutputBuf; *retVal += pCtx->preAggVals.sum; } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; *retVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.sum)); } } else { // computing based on the true data block @@ -520,7 +543,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { notNullElems = 0; if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { - int64_t *retVal = pCtx->aOutputBuf; + int64_t *retVal = (int64_t*) pCtx->aOutputBuf; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { LIST_ADD_N(*retVal, pCtx, pData, int8_t, notNullElems, pCtx->inputType); @@ -532,10 +555,10 @@ static void do_sum(SQLFunctionCtx *pCtx) { LIST_ADD_N(*retVal, pCtx, pData, int64_t, notNullElems, pCtx->inputType); } } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; LIST_ADD_N(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; LIST_ADD_N(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType); } } @@ -555,7 +578,7 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { } SET_VAL(pCtx, 1, 1); - int64_t *res = pCtx->aOutputBuf; + int64_t *res = (int64_t*) pCtx->aOutputBuf; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { *res += GET_INT8_VAL(pData); @@ -566,10 +589,10 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { *res += GET_INT64_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; *retVal += GET_DOUBLE_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = pCtx->aOutputBuf; + double *retVal = (double*) pCtx->aOutputBuf; *retVal += GET_FLOAT_VAL(pData); } @@ -696,7 +719,7 @@ static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY return BLK_DATA_NO_NEEDED; } - SFirstLastInfo *pInfo = (pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { // data in current block is not earlier than current result @@ -710,7 +733,7 @@ static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY return BLK_DATA_NO_NEEDED; } - SFirstLastInfo *pInfo = (pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { @@ -844,7 +867,7 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) { static void avg_func_second_merge(SQLFunctionCtx *pCtx) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); - double *sum = pCtx->aOutputBuf; + double *sum = (double*) pCtx->aOutputBuf; char * input = GET_INPUT_CHAR(pCtx); for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { @@ -890,6 +913,7 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) { // cannot set the numOfIteratedElems again since it is set during previous iteration GET_RES_INFO(pCtx)->numOfRes = 1; + doFinalizer(pCtx); } ///////////////////////////////////////////////////////////////////////////////////////////// @@ -910,7 +934,17 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, tval = &pCtx->preAggVals.max; index = pCtx->preAggVals.maxIndex; } - + + /** + * NOTE: work around the bug caused by invalid pre-calculated function. + * Here the selectivity + ts will not return correct value. + * + * The following codes of 3 lines will be removed later. + */ + if (index < 0 || index >= pCtx->size + pCtx->startOffset) { + index = 0; + } + TSKEY key = pCtx->ptsList[index]; if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { @@ -969,10 +1003,10 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, TYPED_LOOPCHECK_N(int16_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems); } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { int32_t *pData = p; - int32_t *retVal = pOutput; + int32_t *retVal = (int32_t*) pOutput; for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) { continue; } @@ -1217,27 +1251,27 @@ static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - int16_t *output = pCtx->aOutputBuf; + int16_t *output = (int16_t*) pCtx->aOutputBuf; int16_t i = GET_INT16_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - int32_t *output = pCtx->aOutputBuf; + int32_t *output = (int32_t*) pCtx->aOutputBuf; int32_t i = GET_INT32_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - int64_t *output = pCtx->aOutputBuf; + int64_t *output = (int64_t*) pCtx->aOutputBuf; int64_t i = GET_INT64_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - float *output = pCtx->aOutputBuf; + float *output = (float*) pCtx->aOutputBuf; float i = GET_FLOAT_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *output = pCtx->aOutputBuf; + double *output = (double*) pCtx->aOutputBuf; double i = GET_DOUBLE_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); @@ -1301,7 +1335,7 @@ static void stddev_function(SQLFunctionCtx *pCtx) { switch (pCtx->inputType) { case TSDB_DATA_TYPE_INT: { for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull(&((int32_t *)pData)[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { continue; } *retVal += POW2(((int32_t *)pData)[i] - avg); @@ -1424,8 +1458,8 @@ static void stddev_finalizer(SQLFunctionCtx *pCtx) { *retValue = sqrt(pStd->res / pStd->num); SET_VAL(pCtx, 1, 1); } - - resetResultInfo(GET_RES_INFO(pCtx)); + + doFinalizer(pCtx); } ////////////////////////////////////////////////////////////////////////////////////// @@ -1457,7 +1491,9 @@ static void first_function(SQLFunctionCtx *pCtx) { } memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes); - DO_UPDATE_TAG_COLUMNS(pCtx, i); + + TSKEY k = pCtx->ptsList[i]; + DO_UPDATE_TAG_COLUMNS(pCtx, k); SResultInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; @@ -1585,7 +1621,7 @@ static void first_dist_func_second_merge(SQLFunctionCtx *pCtx) { assert(pCtx->resultInfo->superTableQ); char * pData = GET_INPUT_CHAR(pCtx); - SFirstLastInfo *pInput = (pData + pCtx->outputBytes); + SFirstLastInfo *pInput = (SFirstLastInfo*) (pData + pCtx->outputBytes); if (pInput->hasResult != DATA_SET_FLAG) { return; } @@ -1668,7 +1704,7 @@ static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t ind if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[index]) { #if defined(_DEBUG_VIEW) - pTrace("assign index:%d, ts:%lld, val:%d, ", index, timestamp[index], *(int32_t *)pData); + pTrace("assign index:%d, ts:%" PRId64 ", val:%d, ", index, timestamp[index], *(int32_t *)pData); #endif memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); @@ -1763,7 +1799,7 @@ static void last_dist_func_merge(SQLFunctionCtx *pCtx) { static void last_dist_func_second_merge(SQLFunctionCtx *pCtx) { char *pData = GET_INPUT_CHAR(pCtx); - SFirstLastInfo *pInput = (pData + pCtx->outputBytes); + SFirstLastInfo *pInput = (SFirstLastInfo*) (pData + pCtx->outputBytes); if (pInput->hasResult != DATA_SET_FLAG) { return; } @@ -1825,7 +1861,7 @@ static void last_row_finalizer(SQLFunctionCtx *pCtx) { } GET_RES_INFO(pCtx)->numOfRes = 1; - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); } ////////////////////////////////////////////////////////////////////////////////// @@ -1864,14 +1900,15 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, tVariantCreateFromBinary(&val, pData, tDataTypeDesc[type].nSize, type); tValuePair **pList = pInfo->res; - + assert(pList != NULL); + if (pInfo->num < maxLen) { if (pInfo->num == 0 || ((type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) && val.i64Key >= pList[pInfo->num - 1]->v.i64Key) || ((type >= TSDB_DATA_TYPE_FLOAT && type <= TSDB_DATA_TYPE_DOUBLE) && val.dKey >= pList[pInfo->num - 1]->v.dKey)) { - valuePairAssign(pList[pInfo->num], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[pInfo->num], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage); } else { int32_t i = pInfo->num - 1; @@ -1887,7 +1924,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, } } - valuePairAssign(pList[i + 1], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i + 1], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); } pInfo->num++; @@ -1909,7 +1946,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, } } - valuePairAssign(pList[i], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); } } } @@ -1923,7 +1960,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa if (pInfo->num < maxLen) { if (pInfo->num == 0) { - valuePairAssign(pList[pInfo->num], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); } else { int32_t i = pInfo->num - 1; @@ -1939,7 +1976,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa } } - valuePairAssign(pList[i + 1], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i + 1], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage); } pInfo->num++; @@ -1961,7 +1998,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa } } - valuePairAssign(pList[i], type, &val.i64Key, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[i], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage); } } } @@ -2006,15 +2043,8 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { STopBotInfo *pRes = pResInfo->interResultBuf; tValuePair **tvp = pRes->res; - int32_t step = 0; - - // in case of second stage merge, always use incremental output. - if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { - step = QUERY_ASC_FORWARD_STEP; - } else { - step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - } - + + int32_t step = QUERY_ASC_FORWARD_STEP; int32_t len = GET_RES_INFO(pCtx)->numOfRes; switch (type) { @@ -2101,7 +2131,7 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi return true; } - tValuePair *pRes = pTopBotInfo->res; + tValuePair *pRes = (tValuePair*) pTopBotInfo->res; if (functionId == TSDB_FUNC_TOP) { switch (pCtx->inputType) { @@ -2153,7 +2183,7 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { // only the first_stage_merge is directly written data into final output buffer if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) { - return pCtx->aOutputBuf; + return (STopBotInfo*) pCtx->aOutputBuf; } else { // for normal table query and super table at the secondary_stage, result is written to intermediate buffer return pResInfo->interResultBuf; } @@ -2167,14 +2197,14 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { */ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo); - pTopBotInfo->res = tmp; + pTopBotInfo->res = (tValuePair**) tmp; tmp += POINTER_BYTES * pCtx->param[0].i64Key; size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; for (int32_t i = 0; i < pCtx->param[0].i64Key; ++i) { - pTopBotInfo->res[i] = tmp; + pTopBotInfo->res[i] = (tValuePair*) tmp; pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair); tmp += size; } @@ -2393,8 +2423,8 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) { GET_TRUE_DATA_TYPE(); copyTopBotRes(pCtx, type); - - resetResultInfo(pResInfo); + + doFinalizer(pCtx); } /////////////////////////////////////////////////////////////////////////////////////////////// @@ -2409,7 +2439,7 @@ static bool percentile_function_setup(SQLFunctionCtx *pCtx) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); SSchema field[1] = {{pCtx->inputType, "dummyCol", 0, pCtx->inputBytes}}; - tColModel *pModel = tColModelCreate(field, 1, 1000); + SColumnModel *pModel = createColumnModel(field, 1, 1000); int32_t orderIdx = 0; // tOrderDesc object @@ -2470,8 +2500,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { tOrderDescDestroy(pMemBucket->pOrderDesc); tMemBucketDestroy(pMemBucket); - - resetResultInfo(GET_RES_INFO(pCtx)); + + doFinalizer(pCtx); } ////////////////////////////////////////////////////////////////////////////////// @@ -2479,7 +2509,7 @@ static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) { - return pCtx->aOutputBuf; + return (SAPercentileInfo*) pCtx->aOutputBuf; } else { return pResInfo->interResultBuf; } @@ -2590,8 +2620,8 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_CHAR(pCtx); - pInput->pHisto = (char *)pInput + sizeof(SAPercentileInfo); - pInput->pHisto->elems = (char *)pInput->pHisto + sizeof(SHistogramInfo); + pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo)); + pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo)); if (pInput->pHisto->numOfElems <= 0) { return; @@ -2604,13 +2634,13 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { if (pHisto->numOfElems <= 0) { memcpy(pHisto, pInput->pHisto, size); - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); } else { - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN); memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN); - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); tHistogramDestroy(&pRes); } @@ -2622,8 +2652,8 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) { static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) { SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_CHAR(pCtx); - pInput->pHisto = (char *)pInput + sizeof(SAPercentileInfo); - pInput->pHisto->elems = (char *)pInput->pHisto + sizeof(SHistogramInfo); + pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo)); + pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo)); if (pInput->pHisto->numOfElems <= 0) { return; @@ -2634,9 +2664,9 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) { if (pHisto->numOfElems <= 0) { memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1)); - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); } else { - pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN); tHistogramDestroy(&pOutput->pHisto); @@ -2679,8 +2709,8 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) { return; } } - - resetResultInfo(pResInfo); + + doFinalizer(pCtx); } ///////////////////////////////////////////////////////////////////////////////// @@ -2730,7 +2760,7 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) { int32_t *p = pData; // LEASTSQR_CAL_LOOP(pCtx, param, pParamData, p); for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull(p, pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) p, pCtx->inputType)) { continue; } @@ -2860,7 +2890,7 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { param[1][2] /= param[1][1]; sprintf(pCtx->aOutputBuf, "(%lf, %lf)", param[0][2], param[1][2]); - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); } static void date_col_output_function(SQLFunctionCtx *pCtx) { @@ -2872,20 +2902,24 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) { *(int64_t *)(pCtx->aOutputBuf) = pCtx->nStartQueryTimestamp; } +static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) { + date_col_output_function(pCtx); +} + static void col_project_function(SQLFunctionCtx *pCtx) { INC_INIT_VAL(pCtx, pCtx->size); - char *pDest = 0; + char *pData = GET_INPUT_CHAR(pCtx); if (pCtx->order == TSQL_SO_ASC) { - pDest = pCtx->aOutputBuf; + memcpy(pCtx->aOutputBuf, pData, (size_t)pCtx->size * pCtx->inputBytes); } else { - pDest = pCtx->aOutputBuf - (pCtx->size - 1) * pCtx->inputBytes; + for(int32_t i = 0; i < pCtx->size; ++i) { + memcpy(pCtx->aOutputBuf + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes, + pCtx->inputBytes); + } } - char *pData = GET_INPUT_CHAR(pCtx); - memcpy(pDest, pData, (size_t)pCtx->size * pCtx->inputBytes); - - pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes; } static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -2900,7 +2934,7 @@ static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); - pCtx->aOutputBuf += pCtx->inputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->inputBytes/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; } /** @@ -2912,18 +2946,17 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { INC_INIT_VAL(pCtx, pCtx->size); assert(pCtx->inputBytes == pCtx->outputBytes); - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); for (int32_t i = 0; i < pCtx->size; ++i) { tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType); - pCtx->aOutputBuf += pCtx->outputBytes * factor; + pCtx->aOutputBuf += pCtx->outputBytes; } } static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->tag.nType); - pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->outputBytes; } /** @@ -2972,8 +3005,8 @@ static void diff_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - int32_t i = (pCtx->order == TSQL_SO_ASC) ? 0 : pCtx->size - 1; + TSKEY * pTimestamp = pCtx->ptsOutputBuf; switch (pCtx->inputType) { @@ -2982,7 +3015,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { int32_t *pOutput = (int32_t *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } @@ -2993,14 +3026,14 @@ static void diff_function(SQLFunctionCtx *pCtx) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3014,25 +3047,25 @@ static void diff_function(SQLFunctionCtx *pCtx) { int64_t *pOutput = (int64_t *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].i64Key = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3046,23 +3079,23 @@ static void diff_function(SQLFunctionCtx *pCtx) { double *pOutput = (double *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].dKey = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].dKey; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].dKey = pData[i]; @@ -3076,23 +3109,25 @@ static void diff_function(SQLFunctionCtx *pCtx) { float *pOutput = (float *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].dKey = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].dKey; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } // keep the last value, the remain may be all null @@ -3107,23 +3142,24 @@ static void diff_function(SQLFunctionCtx *pCtx) { int16_t *pOutput = (int16_t *)pCtx->aOutputBuf; for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) { + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].i64Key = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3144,16 +3180,18 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet pCtx->param[1].i64Key = pData[i]; pCtx->param[1].nType = pCtx->inputType; - } else if (i == 0) { + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { *pOutput = pData[i] - pCtx->param[1].i64Key; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } else { - *pOutput = pData[i] - pData[i - 1]; + *pOutput = pData[i] - pData[i - step]; *pTimestamp = pCtx->ptsList[i]; - pOutput += step; - pTimestamp += step; + + pOutput += 1; + pTimestamp += 1; } pCtx->param[1].i64Key = pData[i]; @@ -3178,8 +3216,8 @@ static void diff_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += forwardStep; - pCtx->aOutputBuf = pCtx->aOutputBuf + forwardStep * pCtx->outputBytes * step; - pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE * step; + pCtx->aOutputBuf += forwardStep * pCtx->outputBytes; + pCtx->ptsOutputBuf = (char*)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE; } } @@ -3206,7 +3244,7 @@ static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) { GET_RES_INFO(pCtx)->numOfRes += 1; } - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + int32_t step = 1/*GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; switch (pCtx->inputType) { case TSDB_DATA_TYPE_INT: { @@ -3269,24 +3307,24 @@ char *arithmetic_callback_function(void *param, char *name, int32_t colId) { static void arithmetic_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += pCtx->size; - SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz; + SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, pCtx->size, pCtx->aOutputBuf, sas, pCtx->order, arithmetic_callback_function); - pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; + pCtx->param[1].pz = NULL; } -static bool arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { +static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); - SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz; + SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; sas->offset = index; tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, 1, pCtx->aOutputBuf, sas, pCtx->order, arithmetic_callback_function); - pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - return true; + pCtx->aOutputBuf += pCtx->outputBytes/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; } #define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ @@ -3502,7 +3540,6 @@ void spread_func_sec_merge(SQLFunctionCtx *pCtx) { pCtx->param[3].dKey = pData->max; } - // pCtx->numOfIteratedElems += 1; GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; } @@ -3534,9 +3571,8 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { *(double *)pCtx->aOutputBuf = pInfo->max - pInfo->min; } - - // SET_VAL(pCtx, pCtx->numOfIteratedElems, 1); - resetResultInfo(GET_RES_INFO(pCtx)); + + GET_RES_INFO(pCtx)->numOfRes = 1; // todo add test case } /* @@ -3709,7 +3745,7 @@ static void getStatics_i16(int64_t *primaryKey, int16_t *data, int32_t numOfRow, // int16_t lastVal = TSDB_DATA_SMALLINT_NULL; for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_SMALLINT)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_SMALLINT)) { (*numOfNull) += 1; continue; } @@ -3749,7 +3785,7 @@ static void getStatics_i32(int64_t *primaryKey, int32_t *data, int32_t numOfRow, // int32_t lastVal = TSDB_DATA_INT_NULL; for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_INT)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_INT)) { (*numOfNull) += 1; continue; } @@ -3786,7 +3822,7 @@ static void getStatics_i64(int64_t *primaryKey, int64_t *data, int32_t numOfRow, assert(numOfRow <= INT16_MAX); for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(&data[i], TSDB_DATA_TYPE_BIGINT)) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_BIGINT)) { (*numOfNull) += 1; continue; } @@ -3941,9 +3977,9 @@ void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, in } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) { getStatics_i64(primaryKey, (int64_t *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); } else if (type == TSDB_DATA_TYPE_DOUBLE) { - getStatics_d(primaryKey, (double *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); + getStatics_d(primaryKey, (double *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull); } else if (type == TSDB_DATA_TYPE_FLOAT) { - getStatics_f(primaryKey, (float *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); + getStatics_f(primaryKey, (float *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull); } } } @@ -4061,44 +4097,42 @@ static void twa_function(SQLFunctionCtx *pCtx) { // pCtx->numOfIteratedElems += notNullElems; } -static bool twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { +static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return true; + return; } - + SET_VAL(pCtx, 1, 1); - + TSKEY *primaryKey = pCtx->ptsList; - + SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STwaInfo * pInfo = pResInfo->interResultBuf; - + STwaInfo *pInfo = pResInfo->interResultBuf; + if (pInfo->lastKey == INT64_MIN) { pInfo->lastKey = pCtx->nStartQueryTimestamp; setTWALastVal(pCtx, pData, 0, pInfo); - + pInfo->hasResult = DATA_SET_FLAG; } - + if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT || pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { pInfo->dOutput += pInfo->dLastValue * (primaryKey[index] - pInfo->lastKey); } else { pInfo->iOutput += pInfo->iLastValue * (primaryKey[index] - pInfo->lastKey); } - + // record the last key/value pInfo->lastKey = primaryKey[index]; setTWALastVal(pCtx, pData, 0, pInfo); - + // pCtx->numOfIteratedElems += 1; pResInfo->hasResult = DATA_SET_FLAG; - + if (pResInfo->superTableQ) { memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(STwaInfo)); } - - return true; } static void twa_func_merge(SQLFunctionCtx *pCtx) { @@ -4110,7 +4144,7 @@ static void twa_func_merge(SQLFunctionCtx *pCtx) { int32_t numOfNotNull = 0; for (int32_t i = 0; i < pCtx->size; ++i, indicator += sizeof(STwaInfo)) { - STwaInfo *pInput = indicator; + STwaInfo *pInput = (STwaInfo*) indicator; if (pInput->hasResult != DATA_SET_FLAG) { continue; @@ -4171,7 +4205,7 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) { } GET_RES_INFO(pCtx)->numOfRes = 1; - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); } /** @@ -4212,7 +4246,7 @@ static void interp_function(SQLFunctionCtx *pCtx) { if (pCtx->outputType == TSDB_DATA_TYPE_FLOAT) { float v = GET_DOUBLE_VAL(pVal); - assignVal(pCtx->aOutputBuf, &v, pCtx->outputBytes, pCtx->outputType); + assignVal(pCtx->aOutputBuf, (const char*) &v, pCtx->outputBytes, pCtx->outputType); } else { assignVal(pCtx->aOutputBuf, pVal, pCtx->outputBytes, pCtx->outputType); } @@ -4333,9 +4367,465 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) { strcpy(pCtx->aOutputBuf, pTSbuf->path); tsBufDestory(pTSbuf); - resetResultInfo(GET_RES_INFO(pCtx)); + doFinalizer(pCtx); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +// RATE functions + +static double do_calc_rate(const SRateInfo* pRateInfo) { + if ((INT64_MIN == pRateInfo->lastKey) || (INT64_MIN == pRateInfo->firstKey) || (pRateInfo->firstKey >= pRateInfo->lastKey)) { + return 0; + } + + int64_t diff = 0; + + if (pRateInfo->isIRate) { + diff = pRateInfo->lastValue; + if (diff >= pRateInfo->firstValue) { + diff -= pRateInfo->firstValue; + } + } else { + diff = pRateInfo->CorrectionValue + pRateInfo->lastValue - pRateInfo->firstValue; + if (diff <= 0) { + return 0; + } + } + + int64_t duration = pRateInfo->lastKey - pRateInfo->firstKey; + duration = (duration + 500) / 1000; + + double resultVal = ((double)diff) / duration; + + pTrace("do_calc_rate() isIRate:%d firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " resultVal:%f", + pRateInfo->isIRate, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, resultVal); + + return resultVal; +} + + +static bool rate_function_setup(SQLFunctionCtx *pCtx) { + if (!function_setup(pCtx)) { + return false; + } + + SResultInfo *pResInfo = GET_RES_INFO(pCtx); //->aOutputBuf + pCtx->outputBytes; + SRateInfo * pInfo = pResInfo->interResultBuf; + + pInfo->CorrectionValue = 0; + pInfo->firstKey = INT64_MIN; + pInfo->lastKey = INT64_MIN; + pInfo->firstValue = INT64_MIN; + pInfo->lastValue = INT64_MIN; + pInfo->num = 0; + pInfo->sum = 0; + + pInfo->hasResult = 0; + pInfo->isIRate = ((pCtx->functionId == TSDB_FUNC_IRATE) || (pCtx->functionId == TSDB_FUNC_SUM_IRATE) || (pCtx->functionId == TSDB_FUNC_AVG_IRATE)); + return true; +} + + +static void rate_function(SQLFunctionCtx *pCtx) { + + assert(IS_DATA_BLOCK_LOADED(pCtx->blockStatus)); + + int32_t notNullElems = 0; + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + TSKEY *primaryKey = pCtx->ptsList; + + pTrace("%p rate_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull); + + for (int32_t i = 0; i < pCtx->size; ++i) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, i); + if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { + pTrace("%p rate_function() index of null data:%d", pCtx, i); + continue; + } + + notNullElems++; + + int64_t v = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = (int64_t)GET_INT8_VAL(pData); + break; + case TSDB_DATA_TYPE_SMALLINT: + v = (int64_t)GET_INT16_VAL(pData); + break; + case TSDB_DATA_TYPE_INT: + v = (int64_t)GET_INT32_VAL(pData); + break; + case TSDB_DATA_TYPE_BIGINT: + v = (int64_t)GET_INT64_VAL(pData); + break; + default: + assert(0); + } + + if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { + pRateInfo->firstValue = v; + pRateInfo->firstKey = primaryKey[i]; + + pTrace("firstValue:%" PRId64 " firstKey:%" PRId64, pRateInfo->firstValue, pRateInfo->firstKey); + } + + if (INT64_MIN == pRateInfo->lastValue) { + pRateInfo->lastValue = v; + } else if (v < pRateInfo->lastValue) { + pRateInfo->CorrectionValue += pRateInfo->lastValue; + pTrace("CorrectionValue:%" PRId64, pRateInfo->CorrectionValue); + } + + pRateInfo->lastValue = v; + pRateInfo->lastKey = primaryKey[i]; + pTrace("lastValue:%" PRId64 " lastKey:%" PRId64, pRateInfo->lastValue, pRateInfo->lastKey); + } + + if (!pCtx->hasNull) { + assert(pCtx->size == notNullElems); + } + + SET_VAL(pCtx, notNullElems, 1); + + if (notNullElems > 0) { + pRateInfo->hasResult = DATA_SET_FLAG; + pResInfo->hasResult = DATA_SET_FLAG; + } + + // keep the data into the final output buffer for super table query since this execution may be the last one + if (pResInfo->superTableQ) { + memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + } +} + +static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { + return; + } + + // NOTE: keep the intermediate result into the interResultBuf + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + TSKEY *primaryKey = pCtx->ptsList; + + int64_t v = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = (int64_t)GET_INT8_VAL(pData); + break; + case TSDB_DATA_TYPE_SMALLINT: + v = (int64_t)GET_INT16_VAL(pData); + break; + case TSDB_DATA_TYPE_INT: + v = (int64_t)GET_INT32_VAL(pData); + break; + case TSDB_DATA_TYPE_BIGINT: + v = (int64_t)GET_INT64_VAL(pData); + break; + default: + assert(0); + } + + if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { + pRateInfo->firstValue = v; + pRateInfo->firstKey = primaryKey[index]; + } + + if (INT64_MIN == pRateInfo->lastValue) { + pRateInfo->lastValue = v; + } else if (v < pRateInfo->lastValue) { + pRateInfo->CorrectionValue += pRateInfo->lastValue; + } + + pRateInfo->lastValue = v; + pRateInfo->lastKey = primaryKey[index]; + + pTrace("====%p rate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " CorrectionValue:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->CorrectionValue); + + SET_VAL(pCtx, 1, 1); + + // set has result flag + pRateInfo->hasResult = DATA_SET_FLAG; + pResInfo->hasResult = DATA_SET_FLAG; + + // keep the data into the final output buffer for super table query since this execution may be the last one + if (pResInfo->superTableQ) { + memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + } +} + + + +static void rate_func_merge(SQLFunctionCtx *pCtx) { + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + assert(pResInfo->superTableQ); + + pTrace("rate_func_merge() size:%d", pCtx->size); + + //SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SRateInfo *pBuf = (SRateInfo *)pCtx->aOutputBuf; + char *indicator = pCtx->aInputElemBuf; + + assert(1 == pCtx->size); + + int32_t numOfNotNull = 0; + for (int32_t i = 0; i < pCtx->size; ++i, indicator += sizeof(SRateInfo)) { + SRateInfo *pInput = (SRateInfo *)indicator; + if (DATA_SET_FLAG != pInput->hasResult) { + continue; + } + + numOfNotNull++; + memcpy(pBuf, pInput, sizeof(SRateInfo)); + pTrace("%p rate_func_merge() isIRate:%d firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64, + pCtx, pInput->isIRate, pInput->firstKey, pInput->lastKey, pInput->firstValue, pInput->lastValue, pInput->CorrectionValue); + } + + SET_VAL(pCtx, numOfNotNull, 1); + + if (numOfNotNull > 0) { + pBuf->hasResult = DATA_SET_FLAG; + } + + return; +} + + + +static void rate_func_copy(SQLFunctionCtx *pCtx) { + assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); + + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + memcpy(pResInfo->interResultBuf, pCtx->aInputElemBuf, (size_t)pCtx->inputBytes); + pResInfo->hasResult = ((SRateInfo*)pCtx->aInputElemBuf)->hasResult; + + SRateInfo* pRateInfo = (SRateInfo*)pCtx->aInputElemBuf; + pTrace("%p rate_func_second_merge() firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " hasResult:%d", + pCtx, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, pRateInfo->hasResult); +} + + + +static void rate_finalizer(SQLFunctionCtx *pCtx) { + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + + pTrace("%p isIRate:%d firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " hasResult:%d", + pCtx, pRateInfo->isIRate, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, pRateInfo->hasResult); + + if (pRateInfo->hasResult != DATA_SET_FLAG) { + setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + return; + } + + *(double*)pCtx->aOutputBuf = do_calc_rate(pRateInfo); + + pTrace("rate_finalizer() output result:%f", *(double *)pCtx->aOutputBuf); + + // cannot set the numOfIteratedElems again since it is set during previous iteration + pResInfo->numOfRes = 1; + pResInfo->hasResult = DATA_SET_FLAG; + + doFinalizer(pCtx); +} + + +static void irate_function(SQLFunctionCtx *pCtx) { + + assert(IS_DATA_BLOCK_LOADED(pCtx->blockStatus)); + + int32_t notNullElems = 0; + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + TSKEY *primaryKey = pCtx->ptsList; + + pTrace("%p irate_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull); + + if (pCtx->size < 1) { + return; + } + + for (int32_t i = pCtx->size - 1; i >= 0; --i) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, i); + if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { + pTrace("%p irate_function() index of null data:%d", pCtx, i); + continue; + } + + notNullElems++; + + int64_t v = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = (int64_t)GET_INT8_VAL(pData); + break; + case TSDB_DATA_TYPE_SMALLINT: + v = (int64_t)GET_INT16_VAL(pData); + break; + case TSDB_DATA_TYPE_INT: + v = (int64_t)GET_INT32_VAL(pData); + break; + case TSDB_DATA_TYPE_BIGINT: + v = (int64_t)GET_INT64_VAL(pData); + break; + default: + assert(0); + } + + // TODO: calc once if only call this function once ???? + if ((INT64_MIN == pRateInfo->lastKey) || (INT64_MIN == pRateInfo->lastValue)) { + pRateInfo->lastValue = v; + pRateInfo->lastKey = primaryKey[i]; + + pTrace("%p irate_function() lastValue:%" PRId64 " lastKey:%" PRId64, pCtx, pRateInfo->lastValue, pRateInfo->lastKey); + continue; + } + + if ((INT64_MIN == pRateInfo->firstKey) || (INT64_MIN == pRateInfo->firstValue)){ + pRateInfo->firstValue = v; + pRateInfo->firstKey = primaryKey[i]; + + pTrace("%p irate_function() firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, pRateInfo->firstValue, pRateInfo->firstKey); + break; + } + } + + SET_VAL(pCtx, notNullElems, 1); + + if (notNullElems > 0) { + pRateInfo->hasResult = DATA_SET_FLAG; + pResInfo->hasResult = DATA_SET_FLAG; + } + + // keep the data into the final output buffer for super table query since this execution may be the last one + if (pResInfo->superTableQ) { + memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + } } +static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { + return; + } + + // NOTE: keep the intermediate result into the interResultBuf + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + TSKEY *primaryKey = pCtx->ptsList; + + int64_t v = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = (int64_t)GET_INT8_VAL(pData); + break; + case TSDB_DATA_TYPE_SMALLINT: + v = (int64_t)GET_INT16_VAL(pData); + break; + case TSDB_DATA_TYPE_INT: + v = (int64_t)GET_INT32_VAL(pData); + break; + case TSDB_DATA_TYPE_BIGINT: + v = (int64_t)GET_INT64_VAL(pData); + break; + default: + assert(0); + } + + pRateInfo->firstKey = pRateInfo->lastKey; + pRateInfo->firstValue = pRateInfo->lastValue; + + pRateInfo->lastValue = v; + pRateInfo->lastKey = primaryKey[index]; + + pTrace("====%p irate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->firstValue , pRateInfo->firstKey); + + SET_VAL(pCtx, 1, 1); + + // set has result flag + pRateInfo->hasResult = DATA_SET_FLAG; + pResInfo->hasResult = DATA_SET_FLAG; + + // keep the data into the final output buffer for super table query since this execution may be the last one + if (pResInfo->superTableQ) { + memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + } +} + +static void do_sumrate_merge(SQLFunctionCtx *pCtx) { + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + assert(pResInfo->superTableQ); + + SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + char * input = GET_INPUT_CHAR(pCtx); + + for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { + SRateInfo *pInput = (SRateInfo *)input; + + pTrace("%p do_sumrate_merge() hasResult:%d input num:%" PRId64 " input sum:%f total num:%" PRId64 " total sum:%f", pCtx, pInput->hasResult, pInput->num, pInput->sum, pRateInfo->num, pRateInfo->sum); + + if (pInput->hasResult != DATA_SET_FLAG) { + continue; + } else if (pInput->num == 0) { + pRateInfo->sum += do_calc_rate(pInput); + pRateInfo->num++; + } else { + pRateInfo->sum += pInput->sum; + pRateInfo->num += pInput->num; + } + pRateInfo->hasResult = DATA_SET_FLAG; + } + + // if the data set hasResult is not set, the result is null + if (DATA_SET_FLAG == pRateInfo->hasResult) { + pResInfo->hasResult = DATA_SET_FLAG; + SET_VAL(pCtx, pRateInfo->num, 1); + memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + } +} + +static void sumrate_func_merge(SQLFunctionCtx *pCtx) { + pTrace("%p sumrate_func_merge() process ...", pCtx); + do_sumrate_merge(pCtx); +} + +static void sumrate_func_second_merge(SQLFunctionCtx *pCtx) { + pTrace("%p sumrate_func_second_merge() process ...", pCtx); + do_sumrate_merge(pCtx); +} + +static void sumrate_finalizer(SQLFunctionCtx *pCtx) { + SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + + pTrace("%p sumrate_finalizer() superTableQ:%d num:%" PRId64 " sum:%f hasResult:%d", pCtx, pResInfo->superTableQ, pRateInfo->num, pRateInfo->sum, pRateInfo->hasResult); + + if (pRateInfo->hasResult != DATA_SET_FLAG) { + setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + return; + } + + if (pRateInfo->num == 0) { + // from meter + *(double*)pCtx->aOutputBuf = do_calc_rate(pRateInfo); + } else if (pCtx->functionId == TSDB_FUNC_SUM_RATE || pCtx->functionId == TSDB_FUNC_SUM_IRATE) { + *(double*)pCtx->aOutputBuf = pRateInfo->sum; + } else { + *(double*)pCtx->aOutputBuf = pRateInfo->sum / pRateInfo->num; + } + + pResInfo->numOfRes = 1; + pResInfo->hasResult = DATA_SET_FLAG; + doFinalizer(pCtx); +} + + +///////////////////////////////////////////////////////////////////////////////////////////// + + /* * function compatible list. * tag and ts are not involved in the compatibility check @@ -4347,23 +4837,18 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) { * e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last... * */ -int32_t funcCompatDefList[28] = { - /* - * count, sum, avg, min, max, stddev, percentile, apercentile, first, last - */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - - /* - * last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z, tag - */ - 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, - - /* - * colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp - */ - 1, 1, 1, -1, 1, 1, 5}; +int32_t funcCompatDefList[] = { + // count, sum, avg, min, max, stddev, percentile, apercentile, first, last + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + // last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z + 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, + // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp rate irate + 1, 1, 1, 1, -1, 1, 1, 5, 1, 1, + // sum_rate, sum_irate, avg_rate, avg_irate + 1, 1, 1, 1, +}; -SQLAggFuncElem aAggs[28] = {{ +SQLAggFuncElem aAggs[] = {{ // 0, count function does not invoke the finalize function "count", TSDB_FUNC_COUNT, @@ -4373,7 +4858,7 @@ SQLAggFuncElem aAggs[28] = {{ count_function, count_function_f, no_next_step, - noop, + doFinalizer, count_func_merge, count_func_merge, count_load_data_info, @@ -4449,8 +4934,8 @@ SQLAggFuncElem aAggs[28] = {{ stddev_function_f, stddev_next_step, stddev_finalizer, - noop, - noop, + noop1, + noop1, data_req_load_info, }, { @@ -4464,8 +4949,8 @@ SQLAggFuncElem aAggs[28] = {{ percentile_function_f, no_next_step, percentile_finalizer, - noop, - noop, + noop1, + noop1, data_req_load_info, }, { @@ -4494,8 +4979,8 @@ SQLAggFuncElem aAggs[28] = {{ first_function_f, no_next_step, function_finalizer, - noop, - noop, + noop1, + noop1, first_data_req_info, }, { @@ -4509,8 +4994,8 @@ SQLAggFuncElem aAggs[28] = {{ last_function_f, no_next_step, function_finalizer, - noop, - noop, + noop1, + noop1, last_data_req_info, }, { @@ -4522,10 +5007,10 @@ SQLAggFuncElem aAggs[28] = {{ TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, last_row_function, - noop, + noop2, no_next_step, last_row_finalizer, - noop, + noop1, last_dist_func_second_merge, data_req_load_info, }, @@ -4602,8 +5087,8 @@ SQLAggFuncElem aAggs[28] = {{ leastsquares_function_f, no_next_step, leastsquares_finalizer, - noop, - noop, + noop1, + noop1, data_req_load_info, }, { @@ -4614,9 +5099,9 @@ SQLAggFuncElem aAggs[28] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, date_col_output_function, - date_col_output_function, + date_col_output_function_f, no_next_step, - noop, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4628,10 +5113,10 @@ SQLAggFuncElem aAggs[28] = {{ TSDB_FUNC_TS_DUMMY, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, - noop, - noop, + noop1, + noop2, no_next_step, - noop, + doFinalizer, copy_function, copy_function, data_req_load_info, @@ -4644,9 +5129,9 @@ SQLAggFuncElem aAggs[28] = {{ TSDB_BASE_FUNC_SO, function_setup, tag_function, - noop, + noop2, no_next_step, - noop, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4676,7 +5161,7 @@ SQLAggFuncElem aAggs[28] = {{ tag_function, tag_function_f, no_next_step, - noop, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4691,7 +5176,7 @@ SQLAggFuncElem aAggs[28] = {{ col_project_function, col_project_function_f, no_next_step, - noop, + doFinalizer, copy_function, copy_function, data_req_load_info, @@ -4706,7 +5191,7 @@ SQLAggFuncElem aAggs[28] = {{ tag_project_function, tag_project_function_f, no_next_step, - noop, + doFinalizer, copy_function, copy_function, no_data_info, @@ -4721,7 +5206,7 @@ SQLAggFuncElem aAggs[28] = {{ arithmetic_function, arithmetic_function_f, no_next_step, - noop, + doFinalizer, copy_function, copy_function, data_req_load_info, @@ -4736,9 +5221,9 @@ SQLAggFuncElem aAggs[28] = {{ diff_function, diff_function_f, no_next_step, - noop, - noop, - noop, + doFinalizer, + noop1, + noop1, data_req_load_info, }, // distributed version used in two-stage aggregation processes @@ -4782,8 +5267,98 @@ SQLAggFuncElem aAggs[28] = {{ interp_function, do_sum_f, // todo filter handle no_next_step, - noop, - noop, + doFinalizer, + noop1, copy_function, no_data_info, + }, + { + // 28 + "rate", + TSDB_FUNC_RATE, + TSDB_FUNC_RATE, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + rate_function_setup, + rate_function, + rate_function_f, + no_next_step, + rate_finalizer, + rate_func_merge, + rate_func_copy, + data_req_load_info, + }, + { + // 29 + "irate", + TSDB_FUNC_IRATE, + TSDB_FUNC_IRATE, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + rate_function_setup, + irate_function, + irate_function_f, + no_next_step, + rate_finalizer, + rate_func_merge, + rate_func_copy, + data_req_load_info, + }, + { + // 30 + "sum_rate", + TSDB_FUNC_SUM_RATE, + TSDB_FUNC_SUM_RATE, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + rate_function_setup, + rate_function, + rate_function_f, + no_next_step, + sumrate_finalizer, + sumrate_func_merge, + sumrate_func_second_merge, + data_req_load_info, + }, + { + // 31 + "sum_irate", + TSDB_FUNC_SUM_IRATE, + TSDB_FUNC_SUM_IRATE, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + rate_function_setup, + irate_function, + irate_function_f, + no_next_step, + sumrate_finalizer, + sumrate_func_merge, + sumrate_func_second_merge, + data_req_load_info, + }, + { + // 32 + "avg_rate", + TSDB_FUNC_AVG_RATE, + TSDB_FUNC_AVG_RATE, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + rate_function_setup, + rate_function, + rate_function_f, + no_next_step, + sumrate_finalizer, + sumrate_func_merge, + sumrate_func_second_merge, + data_req_load_info, + }, + { + // 33 + "avg_irate", + TSDB_FUNC_AVG_IRATE, + TSDB_FUNC_AVG_IRATE, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + rate_function_setup, + irate_function, + irate_function_f, + no_next_step, + sumrate_finalizer, + sumrate_func_merge, + sumrate_func_second_merge, + data_req_load_info, }}; diff --git a/src/client/src/tscJoinProcess.c b/src/client/src/tscJoinProcess.c index 17ea5cf8862f3fdb42f95cc80b84acf9394d26ae..1bafb60f1a0e487aeaa7b70e1c1817111f8a631d 100644 --- a/src/client/src/tscJoinProcess.c +++ b/src/client/src/tscJoinProcess.c @@ -14,28 +14,15 @@ */ #include "os.h" -#include "tcache.h" #include "tscJoinProcess.h" +#include "tcache.h" #include "tscUtil.h" #include "tsclient.h" #include "tscompression.h" #include "ttime.h" #include "tutil.h" -static UNUSED_FUNC bool isSubqueryCompleted(SSqlObj* pSql) { - bool hasData = true; - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlRes* pRes = &pSql->pSubs[i]->res; - - // in case inner join, if any subquery exhausted, query completed - if (pRes->numOfRows == 0) { - hasData = false; - break; - } - } - - return hasData; -} +static void freeSubqueryObj(SSqlObj* pSql); static bool doCompare(int32_t order, int64_t left, int64_t right) { if (order == TSQL_SO_ASC) { @@ -45,19 +32,24 @@ static bool doCompare(int32_t order, int64_t left, int64_t right) { } } -static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, SJoinSubquerySupporter* pSupporter2, - TSKEY* st, TSKEY* et) { +static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, + SJoinSubquerySupporter* pSupporter2, TSKEY* st, TSKEY* et) { STSBuf* output1 = tsBufCreate(true); STSBuf* output2 = tsBufCreate(true); *st = INT64_MAX; *et = INT64_MIN; - SLimitVal* pLimit = &pSql->cmd.limit; - int32_t order = pSql->cmd.order.order; - - pSql->pSubs[0]->cmd.tsBuf = output1; - pSql->pSubs[1]->cmd.tsBuf = output2; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + + SLimitVal* pLimit = &pQueryInfo->limit; + int32_t order = pQueryInfo->order.order; + + SQueryInfo* pSubQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[0]->cmd, 0); + SQueryInfo* pSubQueryInfo2 = tscGetQueryInfoDetail(&pSql->pSubs[1]->cmd, 0); + + pSubQueryInfo1->tsBuf = output1; + pSubQueryInfo2->tsBuf = output2; tsBufResetPos(pSupporter1->pTSBuf); tsBufResetPos(pSupporter2->pTSBuf); @@ -88,7 +80,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor #ifdef _DEBUG_VIEW // for debug purpose - tscPrint("%lld, tags:%d \t %lld, tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag); + tscPrint("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag); #endif if (elem1.tag < elem2.tag || (elem1.tag == elem2.tag && doCompare(order, elem1.ts, elem2.ts))) { @@ -104,16 +96,19 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor numOfInput2++; } else { - if (*st > elem1.ts) { - *st = elem1.ts; - } - - if (*et < elem1.ts) { - *et = elem1.ts; - } - - // in case of stable query, limit/offset is not applied here - if (pLimit->offset == 0 || pSql->cmd.nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pSql->cmd.type)) { + /* + * in case of stable query, limit/offset is not applied here. the limit/offset is applied to the + * final results which is acquired after the secondry merge of in the client. + */ + if (pLimit->offset == 0 || pQueryInfo->nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { + if (*st > elem1.ts) { + *st = elem1.ts; + } + + if (*et < elem1.ts) { + *et = elem1.ts; + } + tsBufAppend(output1, elem1.vnode, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); tsBufAppend(output2, elem2.vnode, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); } else { @@ -150,30 +145,33 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor tsBufDestory(pSupporter1->pTSBuf); tsBufDestory(pSupporter2->pTSBuf); - tscTrace("%p input1:%lld, input2:%lld, %lld for secondary query after ts blocks intersecting", - pSql, numOfInput1, numOfInput2, output1->numOfTotal); + tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks " + "intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql, + numOfInput1, numOfInput2, output1->numOfTotal, *st, *et); return output1->numOfTotal; } -//todo handle failed to create sub query -SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) { +// todo handle failed to create sub query +SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index) { SJoinSubquerySupporter* pSupporter = calloc(1, sizeof(SJoinSubquerySupporter)); if (pSupporter == NULL) { return NULL; } pSupporter->pObj = pSql; - pSupporter->hasMore = true; - pSupporter->pState = pState; pSupporter->subqueryIndex = index; - pSupporter->interval = pSql->cmd.nAggTimeInterval; - pSupporter->limit = pSql->cmd.limit; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + + pSupporter->interval = pQueryInfo->nAggTimeInterval; + pSupporter->limit = pQueryInfo->limit; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, index); pSupporter->uid = pMeterMetaInfo->pMeterMeta->uid; + + assert (pSupporter->uid != 0); getTmpfilePath("join-", pSupporter->path); pSupporter->f = fopen(pSupporter->path, "w"); @@ -190,7 +188,7 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) { return; } - tfree(pSupporter->exprsInfo.pExprs); + tscSqlExprInfoDestroy(&pSupporter->exprsInfo); tscColumnBaseInfoDestroy(&pSupporter->colList); tscClearFieldInfo(&pSupporter->fieldsInfo); @@ -210,10 +208,9 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) { * primary timestamp column , the secondary query is not necessary * */ -bool needSecondaryQuery(SSqlObj* pSql) { - SSqlCmd* pCmd = &pSql->cmd; - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { - SColumnBase* pBase = tscColumnBaseInfoGet(&pCmd->colList, i); +bool needSecondaryQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { + SColumnBase* pBase = tscColumnBaseInfoGet(&pQueryInfo->colList, i); if (pBase->colIndex.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return true; } @@ -225,102 +222,147 @@ bool needSecondaryQuery(SSqlObj* pSql) { /* * launch secondary stage query to fetch the result that contains timestamp in set */ -int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { - // TODO not launch secondary stage query - // if (!needSecondaryQuery(pSql)) { - // return; - // } - - // sub query may not be necessary +int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql) { int32_t numOfSub = 0; SJoinSubquerySupporter* pSupporter = NULL; - + + /* + * If the columns are not involved in the final select clause, the secondary query will not be launched + * for the subquery. + */ + SSubqueryState* pState = NULL; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { pSupporter = pSql->pSubs[i]->param; - pSupporter->pState->numOfCompleted = 0; - if (pSupporter->exprsInfo.numOfExprs > 0) { ++numOfSub; } } - + + assert(numOfSub > 0); + // scan all subquery, if one sub query has only ts, ignore it - int32_t j = 0; - tscTrace("%p start to launch secondary subqueries: %d", pSql, pSql->numOfSubs); + tscTrace("%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in " + "select clause", pSql, pSql->numOfSubs, numOfSub); + /* + * the subqueries that do not actually launch the secondary query to virtual node is set as completed. + */ + pState = pSupporter->pState; + pState->numOfTotal = pSql->numOfSubs; + pState->numOfCompleted = (pSql->numOfSubs - numOfSub); + + bool success = true; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - pSupporter = pSub->param; - pSupporter->pState->numOfTotal = numOfSub; - + SSqlObj *pPrevSub = pSql->pSubs[i]; + pSql->pSubs[i] = NULL; + + pSupporter = pPrevSub->param; + if (pSupporter->exprsInfo.numOfExprs == 0) { + tscTrace("%p subIndex: %d, not need to launch query, ignore it", pSql, i); + tscDestroyJoinSupporter(pSupporter); - taos_free_result(pSub); + tscFreeSqlObj(pPrevSub); + + pSql->pSubs[i] = NULL; continue; } - - SSqlObj* pNew = createSubqueryObj(pSql, 0, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL); + + SQueryInfo *pSubQueryInfo = tscGetQueryInfoDetail(&pPrevSub->cmd, 0); + STSBuf *pTSBuf = pSubQueryInfo->tsBuf; + pSubQueryInfo->tsBuf = NULL; + + // free result for async object will also free sqlObj + taos_free_result(pPrevSub); + + SSqlObj *pNew = createSubqueryObj(pSql, (int16_t) i, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { - pSql->numOfSubs = i; //revise the number of subquery - pSupporter->pState->numOfTotal = i; - - pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscDestroyJoinSupporter(pSupporter); - - return NULL; + success = false; + break; } - - tscFreeSqlCmdData(&pNew->cmd); - - pSql->pSubs[j++] = pNew; - pNew->cmd.tsBuf = pSub->cmd.tsBuf; - pSub->cmd.tsBuf = NULL; - - taos_free_result(pSub); - + + tscClearSubqueryInfo(&pNew->cmd); + pSql->pSubs[i] = pNew; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object + // set the second stage sub query for join process - pNew->cmd.type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE; - - pNew->cmd.nAggTimeInterval = pSupporter->interval; - pNew->cmd.limit = pSupporter->limit; - pNew->cmd.groupbyExpr = pSupporter->groupbyExpr; - - tscColumnBaseInfoCopy(&pNew->cmd.colList, &pSupporter->colList, 0); - tscTagCondCopy(&pNew->cmd.tagCond, &pSupporter->tagCond); - - tscSqlExprCopy(&pNew->cmd.exprsInfo, &pSupporter->exprsInfo, pSupporter->uid); - tscFieldInfoCopyAll(&pSupporter->fieldsInfo, &pNew->cmd.fieldsInfo); - - // add the ts function for interval query if it is missing - if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS && pNew->cmd.nAggTimeInterval > 0) { - tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0); + pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE; + + pQueryInfo->nAggTimeInterval = pSupporter->interval; + pQueryInfo->groupbyExpr = pSupporter->groupbyExpr; + + tscColumnBaseInfoCopy(&pQueryInfo->colList, &pSupporter->colList, 0); + tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond); + + tscSqlExprCopy(&pQueryInfo->exprsInfo, &pSupporter->exprsInfo, pSupporter->uid); + tscFieldInfoCopyAll(&pQueryInfo->fieldsInfo, &pSupporter->fieldsInfo); + + /* + * if the first column of the secondary query is not ts function, add this function. + * Because this column is required to filter with timestamp after intersecting. + */ + if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS) { + tscAddTimestampColumn(pQueryInfo, TSDB_FUNC_TS, 0); } - + // todo refactor function name - tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0); - tscFieldInfoCalOffset(&pNew->cmd); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); - + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + assert(pNew->numOfSubs == 0 && pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1); + + tscFieldInfoCalOffset(pNewQueryInfo); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pNewQueryInfo, 0); + + /* + * When handling the projection query, the offset value will be modified for table-table join, which is changed + * during the timestamp intersection. + */ + pSupporter->limit = pQueryInfo->limit; + pNewQueryInfo->limit = pSupporter->limit; + // fetch the join tag column - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0); - assert(pNew->cmd.tagCond.joinInfo.hasJoin); - - int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0); + assert(pQueryInfo->tagCond.joinInfo.hasJoin); + + int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid); pExpr->param[0].i64Key = tagColIndex; pExpr->numOfParams = 1; - - addRequiredTagColumn(&pNew->cmd, tagColIndex, 0); } - - tscProcessSql(pNew); + + tscPrintSelectClause(pNew, 0); + + tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s", + pSql, pNew, 0, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, + pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols, + pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name); + } + + //prepare the subqueries object failed, abort + if (!success) { + pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql, + pSql->numOfSubs, pSql->res.code); + freeSubqueryObj(pSql); + + return pSql->res.code; + } + + for(int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + tscProcessSql(pSub); } - // revise the number of subs - pSql->numOfSubs = j; - - return 0; + return TSDB_CODE_SUCCESS; } static void freeSubqueryObj(SSqlObj* pSql) { @@ -353,7 +395,10 @@ static void doQuitSubquery(SSqlObj* pParentSql) { } static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter) { - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { pSqlObj->res.code = abs(pSupporter->pState->code); tscError("%p all subquery return and query failed, global code:%d", pSqlObj, pSqlObj->res.code); @@ -362,11 +407,11 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter } // update the query time range according to the join results on timestamp -static void updateQueryTimeRange(SSqlObj* pSql, int64_t st, int64_t et) { - assert(pSql->cmd.stime <= st && pSql->cmd.etime >= et); +static void updateQueryTimeRange(SQueryInfo* pQueryInfo, int64_t st, int64_t et) { + assert(pQueryInfo->stime <= st && pQueryInfo->etime >= et); - pSql->cmd.stime = st; - pSql->cmd.etime = et; + pQueryInfo->stime = st; + pQueryInfo->etime = et; } static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { @@ -374,8 +419,12 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { SSqlObj* pParentSql = pSupporter->pObj; SSqlObj* pSql = (SSqlObj*)tres; - - if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) { + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) { if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pSupporter->pState->code); @@ -384,9 +433,9 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { return; } - if (numOfRows > 0) { // write the data into disk + if (numOfRows > 0) { // write the data into disk fwrite(pSql->res.data, pSql->res.numOfRows, 1, pSupporter->f); - fflush(pSupporter->f); + fclose(pSupporter->f); STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true); if (pBuf == NULL) { @@ -401,7 +450,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path); pSupporter->pTSBuf = pBuf; } else { - tsBufMerge(pSupporter->pTSBuf, pBuf, pSql->cmd.vnodeIdx); + assert(pQueryInfo->numOfTables == 1); // for subquery, only one metermetaInfo + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex); tsBufDestory(pBuf); } @@ -411,12 +463,37 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { pSql->res.row = pSql->res.numOfRows; taos_fetch_rows_a(tres, joinRetrieveCallback, param); - } else if (numOfRows == 0) { // no data from this vnode anymore - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + } else if (numOfRows == 0) { // no data from this vnode anymore + SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex); + + //todo refactor + if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1); + + // for projection query, need to try next vnode + int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes; + if ((++pMeterMetaInfo->vnodeIndex) < totalVnode) { + tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql, + pMeterMetaInfo->vnodeIndex - 1, pMeterMetaInfo->vnodeIndex, totalVnode, pRes->numOfTotal); + + pSql->cmd.command = TSDB_SQL_SELECT; + pSql->fp = tscJoinQueryCallback; + tscProcessSql(pSql); + return; + } + } + + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { + assert(finished == numOfTotal); + if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { tscTrace("%p sub:%p, numOfSub:%d, quit from further procedure due to other queries failure", pParentSql, tres, - pSupporter->subqueryIndex); + pSupporter->subqueryIndex); doQuitSubquery(pParentSql); return; } @@ -433,8 +510,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscTrace("%p free all sub SqlObj and quit", pParentSql); doQuitSubquery(pParentSql); } else { - updateQueryTimeRange(pParentSql, st, et); - tscLaunchSecondSubquery(pParentSql); + updateQueryTimeRange(pParentQueryInfo, st, et); + tscLaunchSecondPhaseSubqueries(pParentSql); } } } else { // failure of sub query @@ -451,64 +528,145 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscError("%p retrieve failed, code:%d, index:%d", pSql, numOfRows, pSupporter->subqueryIndex); } - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { - tscTrace("%p secondary retrieve completed, global code:%d", tres, pParentSql->res.code); + if (numOfRows >= 0) { + pSql->res.numOfTotal += pSql->res.numOfRows; + } + + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && numOfRows == 0) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1); + + // for projection query, need to try next vnode if current vnode is exhausted + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + pSupporter->pState->numOfCompleted = 0; + pSupporter->pState->numOfTotal = 1; + + pSql->cmd.command = TSDB_SQL_SELECT; + pSql->fp = tscJoinQueryCallback; + tscProcessSql(pSql); + + return; + } + } + + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { + assert(finished == numOfTotal); + tscTrace("%p all %d secondary subquery retrieves completed, global code:%d", tres, numOfTotal, + pParentSql->res.code); + if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { pParentSql->res.code = abs(pSupporter->pState->code); freeSubqueryObj(pParentSql); } tsem_post(&pParentSql->rspSem); + } else { + tscTrace("%p sub:%p completed, completed:%d, total:%d", pParentSql, tres, finished, numOfTotal); + } + } +} + +static SJoinSubquerySupporter* tscUpdateSubqueryStatus(SSqlObj* pSql, int32_t numOfFetch) { + int32_t notInvolved = 0; + SJoinSubquerySupporter* pSupporter = NULL; + SSubqueryState* pState = NULL; + + for(int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == NULL) { + notInvolved++; + } else { + pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param; + pState = pSupporter->pState; } } + + pState->numOfTotal = pSql->numOfSubs; + pState->numOfCompleted = pSql->numOfSubs - numOfFetch; + + return pSupporter; } void tscFetchDatablockFromSubquery(SSqlObj* pSql) { int32_t numOfFetch = 0; - + assert(pSql->numOfSubs >= 1); + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param; - - SSqlRes* pRes = &pSql->pSubs[i]->res; - if (pRes->row >= pRes->numOfRows && pSupporter->hasMore) { - numOfFetch++; + if (pSql->pSubs[i] == NULL) { // this subquery does not need to involve in secondary query + continue; + } + + SSqlRes *pRes = &pSql->pSubs[i]->res; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes && + (!tscHasReachLimitation(pQueryInfo, pRes))) { + numOfFetch++; + } + } else { + if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pQueryInfo, pRes))) { + numOfFetch++; + } } } - if (numOfFetch > 0) { - tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch); - - SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param; - pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed - pSupporter->pState->numOfCompleted = 0; - - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlObj* pSql1 = pSql->pSubs[i]; + if (numOfFetch <= 0) { + return; + } - SSqlRes* pRes1 = &pSql1->res; - SSqlCmd* pCmd1 = &pSql1->cmd; + // TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled + tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch); - pSupporter = (SJoinSubquerySupporter*)pSql1->param; + SJoinSubquerySupporter* pSupporter = tscUpdateSubqueryStatus(pSql, numOfFetch); + + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj* pSql1 = pSql->pSubs[i]; + if (pSql1 == NULL) { + continue; + } + + SSqlRes* pRes1 = &pSql1->res; + SSqlCmd* pCmd1 = &pSql1->cmd; - // wait for all subqueries completed - pSupporter->pState->numOfTotal = numOfFetch; - if (pRes1->row >= pRes1->numOfRows && pSupporter->hasMore) { - tscTrace("%p subquery:%p retrieve data from vnode, index:%d", pSql, pSql1, pSupporter->subqueryIndex); + pSupporter = (SJoinSubquerySupporter*)pSql1->param; - tscResetForNextRetrieve(pRes1); + // wait for all subqueries completed + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd1, 0); + assert(pRes1->numOfRows >= 0 && pQueryInfo->numOfTables == 1); - pSql1->fp = joinRetrieveCallback; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + if (pRes1->row >= pRes1->numOfRows) { + tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1, + pSupporter->subqueryIndex, pMeterMetaInfo->vnodeIndex); - if (pCmd1->command < TSDB_SQL_LOCAL) { - pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; - } + tscResetForNextRetrieve(pRes1); + pSql1->fp = joinRetrieveCallback; - tscProcessSql(pSql1); + if (pCmd1->command < TSDB_SQL_LOCAL) { + pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } + + tscProcessSql(pSql1); } + } - // wait for all subquery completed - tsem_wait(&pSql->rspSem); + // wait for all subquery completed + tsem_wait(&pSql->rspSem); + + // update the records for each subquery + for(int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == NULL) { + continue; + } + + SSqlRes* pRes1 = &pSql->pSubs[i]->res; + pRes1->numOfTotalInCurrentClause += pRes1->numOfRows; } } @@ -519,26 +677,32 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { tscTrace("%p all subquery response, retrieve data", pSql); - pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols); + if (pRes->pColumnIndex != NULL) { + return; // the column transfer support struct has been built + } - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pQueryInfo->fieldsInfo.numOfOutputCols); - int32_t tableIndexOfSub = -1; - for (int32_t j = 0; j < pCmd->numOfTables; ++j) { - SSqlObj* pSub = pSql->pSubs[j]; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSub->cmd, 0); + int32_t tableIndexOfSub = -1; + for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, j); if (pMeterMetaInfo->pMeterMeta->uid == pExpr->uid) { tableIndexOfSub = j; break; } } + assert(tableIndexOfSub >= 0 && tableIndexOfSub < pQueryInfo->numOfTables); + SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd; - - for (int32_t k = 0; k < pSubCmd->exprsInfo.numOfExprs; ++k) { - SSqlExpr* pSubExpr = tscSqlExprGet(pSubCmd, k); + SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0); + + for (int32_t k = 0; k < pSubQueryInfo->exprsInfo.numOfExprs; ++k) { + SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k); if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) { pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k}; break; @@ -549,7 +713,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { SSqlObj* pSql = (SSqlObj*)tres; - // SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + // SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); // int32_t idx = pSql->cmd.vnodeIdx; @@ -573,12 +737,13 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { // // no qualified result // } // - // tscLaunchSecondSubquery(pSql, ts, num); + // tscLaunchSecondPhaseSubqueries(pSql, ts, num); // } else { // } // } else { - if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { if (code != TSDB_CODE_SUCCESS) { // direct call joinRetrieveCallback and set the error code joinRetrieveCallback(param, pSql, code); } else { // first stage query, continue to retrieve data @@ -605,23 +770,42 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { quitAllSubquery(pParentSql, pSupporter); } else { - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + int32_t numOfTotal = pSupporter->pState->numOfTotal; + int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1); + + if (finished >= numOfTotal) { + assert(finished == numOfTotal); + tscSetupOutputColumnIndex(pParentSql); - - if (pParentSql->fp == NULL) { - tsem_wait(&pParentSql->emptyRspSem); - tsem_wait(&pParentSql->emptyRspSem); - - tsem_post(&pParentSql->rspSem); - } else { - // set the command flag must be after the semaphore been correctly set. - // pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; - // if (pPObj->res.code == TSDB_CODE_SUCCESS) { - // (*pPObj->fp)(pPObj->param, pPObj, 0); - // } else { - // tscQueueAsyncRes(pPObj); - // } - assert(0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + /** + * if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of + * data instead of returning to its invoker + */ + if (pMeterMetaInfo->vnodeIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes); + pSupporter->pState->numOfCompleted = 0; // reset the record value + + pSql->fp = joinRetrieveCallback; // continue retrieve data + pSql->cmd.command = TSDB_SQL_FETCH; + tscProcessSql(pSql); + } else { // first retrieve from vnode during the secondary stage sub-query + if (pParentSql->fp == NULL) { + tsem_wait(&pParentSql->emptyRspSem); + tsem_wait(&pParentSql->emptyRspSem); + + tsem_post(&pParentSql->rspSem); + } else { + // set the command flag must be after the semaphore been correctly set. + // pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; + // if (pPObj->res.code == TSDB_CODE_SUCCESS) { + // (*pPObj->fp)(pPObj->param, pPObj, 0); + // } else { + // tscQueueAsyncRes(pPObj); + // } + assert(0); + } } } } @@ -708,7 +892,9 @@ STSBuf* tsBufCreate(bool autoDelete) { return NULL; } - allocResForTSBuf(pTSBuf); + if (NULL == allocResForTSBuf(pTSBuf)) { + return NULL; + } // update the header info STSBufFileHeader header = {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = TSQL_SO_ASC}; @@ -731,8 +917,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { strncpy(pTSBuf->path, path, PATH_MAX); - pTSBuf->f = fopen(pTSBuf->path, "r"); + pTSBuf->f = fopen(pTSBuf->path, "r+"); if (pTSBuf->f == NULL) { + free(pTSBuf); return NULL; } @@ -774,7 +961,8 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { size_t infoSize = sizeof(STSVnodeBlockInfo) * pTSBuf->numOfVnodes; STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize); - int64_t pos = ftell(pTSBuf->f); + + //int64_t pos = ftell(pTSBuf->f); //pos not used fread(buf, infoSize, 1, pTSBuf->f); // the length value for each vnode is not kept in file, so does not set the length value @@ -790,19 +978,23 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { struct stat fileStat; fstat(fileno(pTSBuf->f), &fileStat); - pTSBuf->fileSize = (uint32_t) fileStat.st_size; + pTSBuf->fileSize = (uint32_t)fileStat.st_size; tsBufResetPos(pTSBuf); // ascending by default pTSBuf->cur.order = TSQL_SO_ASC; pTSBuf->autoDelete = autoDelete; + + tscTrace("create tsBuf from file:%s, fd:%d, size:%d, numOfVnode:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), + pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete); + return pTSBuf; } -void tsBufDestory(STSBuf* pTSBuf) { +void* tsBufDestory(STSBuf* pTSBuf) { if (pTSBuf == NULL) { - return; + return NULL; } tfree(pTSBuf->assistBuf); @@ -814,10 +1006,21 @@ void tsBufDestory(STSBuf* pTSBuf) { fclose(pTSBuf->f); if (pTSBuf->autoDelete) { + tscTrace("tsBuf %p destroyed, delete tmp file:%s", pTSBuf, pTSBuf->path); unlink(pTSBuf->path); + } else { + tscTrace("tsBuf %p destroyed, tmp file:%s, remains", pTSBuf, pTSBuf->path); } free(pTSBuf); + return NULL; +} + +static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) { + int32_t last = pTSBuf->numOfVnodes - 1; + + assert(last >= 0); + return &pTSBuf->pData[last]; } static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { @@ -836,10 +1039,10 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { } if (pTSBuf->numOfVnodes > 0) { - STSVnodeBlockInfo* pPrevBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info; + STSVnodeBlockInfoEx* pPrevBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pPrevBlockInfo); + TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pPrevBlockInfoEx->info); } // set initial value for vnode block @@ -857,9 +1060,9 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { // update the header info STSBufFileHeader header = { .magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; - STSBufUpdateHeader(pTSBuf, &header); - return &pTSBuf->pData[pTSBuf->numOfVnodes - 1]; + STSBufUpdateHeader(pTSBuf, &header); + return tsBufGetLastVnodeInfo(pTSBuf); } static void shrinkBuffer(STSList* ptsData) { @@ -906,8 +1109,10 @@ static void writeDataToDisk(STSBuf* pTSBuf) { pTSBuf->tsData.len = 0; - pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.compLen += blockSize; - pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.numOfBlocks += 1; + STSVnodeBlockInfoEx* pVnodeBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); + + pVnodeBlockInfoEx->info.compLen += blockSize; + pVnodeBlockInfoEx->info.numOfBlocks += 1; shrinkBuffer(&pTSBuf->tsData); } @@ -1008,13 +1213,13 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData STSVnodeBlockInfoEx* pBlockInfo = NULL; STSList* ptsData = &pTSBuf->tsData; - if (pTSBuf->numOfVnodes == 0 || pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.vnode != vnodeId) { + if (pTSBuf->numOfVnodes == 0 || tsBufGetLastVnodeInfo(pTSBuf)->info.vnode != vnodeId) { writeDataToDisk(pTSBuf); shrinkBuffer(ptsData); pBlockInfo = addOneVnodeInfo(pTSBuf, vnodeId); } else { - pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1]; + pBlockInfo = tsBufGetLastVnodeInfo(pTSBuf); } assert(pBlockInfo->info.vnode == vnodeId); @@ -1037,6 +1242,8 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData pTSBuf->numOfTotal += len / TSDB_KEYSIZE; + // the size of raw data exceeds the size of the default prepared buffer, so + // during getBufBlock, the output buffer needs to be large enough. if (ptsData->len >= ptsData->threshold) { writeDataToDisk(pTSBuf); shrinkBuffer(ptsData); @@ -1053,10 +1260,10 @@ void tsBufFlush(STSBuf* pTSBuf) { writeDataToDisk(pTSBuf); shrinkBuffer(&pTSBuf->tsData); - STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info; + STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); + TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pBlockInfoEx->info); // save the ts order into header STSBufFileHeader header = { @@ -1157,11 +1364,22 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex } STSBlock* pBlock = &pTSBuf->block; + + size_t s = pBlock->numOfElem * TSDB_KEYSIZE; + + /* + * In order to accommodate all the qualified data, the actual buffer size for one block with identical tags value + * may exceed the maximum allowed size during *tsBufAppend* function by invoking expandBuffer function + */ + if (s > pTSBuf->tsData.allocSize) { + expandBuffer(&pTSBuf->tsData, s); + } + pTSBuf->tsData.len = tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf, pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize); - assert(pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem); + assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len)); pCur->vnodeIndex = vnodeIndex; pCur->blockIndex = blockIndex; @@ -1203,20 +1421,20 @@ bool tsBufNextPos(STSBuf* pTSBuf) { if (pCur->vnodeIndex == -1) { if (pCur->order == TSQL_SO_ASC) { tsBufGetBlock(pTSBuf, 0, 0); - - if (pTSBuf->block.numOfElem == 0) { // the whole list is empty, return + + if (pTSBuf->block.numOfElem == 0) { // the whole list is empty, return tsBufResetPos(pTSBuf); return false; } else { return true; } - - } else { // get the last timestamp record in the last block of the last vnode + + } else { // get the last timestamp record in the last block of the last vnode assert(pTSBuf->numOfVnodes > 0); - + int32_t vnodeIndex = pTSBuf->numOfVnodes - 1; pCur->vnodeIndex = vnodeIndex; - + int32_t vnodeId = pTSBuf->pData[pCur->vnodeIndex].info.vnode; STSVnodeBlockInfo* pBlockInfo = tsBufGetVnodeBlockInfo(pTSBuf, vnodeId); int32_t blockIndex = pBlockInfo->numOfBlocks - 1; @@ -1250,6 +1468,10 @@ bool tsBufNextPos(STSBuf* pTSBuf) { pCur->vnodeIndex = -1; return false; } + + if (pBlockInfo == NULL) { + return false; + } int32_t blockIndex = pCur->order == TSQL_SO_ASC ? 0 : pBlockInfo->numOfBlocks - 1; tsBufGetBlock(pTSBuf, pCur->vnodeIndex + step, blockIndex); @@ -1318,7 +1540,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { tsBufFlush(pDestBuf); // compared with the last vnode id - if (vnodeId != pDestBuf->pData[pDestBuf->numOfVnodes - 1].info.vnode) { + if (vnodeId != tsBufGetLastVnodeInfo(pDestBuf)->info.vnode) { int32_t oldSize = pDestBuf->numOfVnodes; int32_t newSize = oldSize + pSrcBuf->numOfVnodes; @@ -1345,36 +1567,49 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { pDestBuf->numOfVnodes = newSize; } else { - STSVnodeBlockInfoEx* pBlockInfoEx = &pDestBuf->pData[pDestBuf->numOfVnodes - 1]; + STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pDestBuf); + pBlockInfoEx->len += pSrcBuf->pData[0].len; pBlockInfoEx->info.numOfBlocks += pSrcBuf->pData[0].info.numOfBlocks; pBlockInfoEx->info.compLen += pSrcBuf->pData[0].info.compLen; pBlockInfoEx->info.vnode = vnodeId; } - int64_t r = fseek(pDestBuf->f, 0, SEEK_END); + int32_t r = fseek(pDestBuf->f, 0, SEEK_END); assert(r == 0); int64_t offset = getDataStartOffset(); int32_t size = pSrcBuf->fileSize - offset; #ifdef LINUX - ssize_t rc = sendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size); + ssize_t rc = tsendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size); #else ssize_t rc = fsendfile(pDestBuf->f, pSrcBuf->f, &offset, size); #endif + if (rc == -1) { - printf("%s\n", strerror(errno)); + tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); return -1; } if (rc != size) { - printf("%s\n", strerror(errno)); + tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); return -1; } pDestBuf->numOfTotal += pSrcBuf->numOfTotal; + int32_t oldSize = pDestBuf->fileSize; + + struct stat fileStat; + fstat(fileno(pDestBuf->f), &fileStat); + pDestBuf->fileSize = (uint32_t)fileStat.st_size; + + assert(pDestBuf->fileSize == oldSize + size); + + tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf, + pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete); + return 0; } @@ -1391,7 +1626,7 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET); - fwrite((void*) pData, 1, len, pTSBuf->f); + fwrite((void*)pData, 1, len, pTSBuf->f); pTSBuf->fileSize += len; pTSBuf->tsOrder = order; @@ -1485,7 +1720,7 @@ void tsBufDisplay(STSBuf* pTSBuf) { while (tsBufNextPos(pTSBuf)) { STSElem elem = tsBufGetElem(pTSBuf); - printf("%d-%lld-%lld\n", elem.vnode, elem.tag, elem.ts); + printf("%d-%" PRId64 "-%" PRId64 "\n", elem.vnode, *(int64_t*) elem.tag, elem.ts); } pTSBuf->cur.order = old; diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 5ae72acd57a96d8e83deaa68800369f14e69b31c..402838bb680b7d4e61c3e17415eb4b7450944ccd 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -39,32 +39,24 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type case TSDB_DATA_TYPE_NCHAR: return length; case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32_ double dv = 0; - *(int64_t *)(&dv) = *(int64_t *)pData; - len = sprintf(buf, "%f", dv); -#else - len = sprintf(buf, "%lf", *(double *)pData); -#endif + dv = GET_DOUBLE_VAL(pData); + len = sprintf(buf, "%lf", dv); if (strncasecmp("nan", buf, 3) == 0) { len = 4; } } break; case TSDB_DATA_TYPE_FLOAT: { -#ifdef _TD_ARM_32_ float fv = 0; - *(int32_t *)(&fv) = *(int32_t *)pData; + fv = GET_FLOAT_VAL(pData); len = sprintf(buf, "%f", fv); -#else - len = sprintf(buf, "%f", *(float *)pData); -#endif if (strncasecmp("nan", buf, 3) == 0) { len = 4; } } break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - len = sprintf(buf, "%lld", *(int64_t *)pData); + len = sprintf(buf, "%" PRId64 "", *(int64_t *)pData); break; case TSDB_DATA_TYPE_BOOL: len = MAX_BOOL_TYPE_LENGTH; @@ -85,7 +77,7 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type * length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t) */ static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) { - SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta; + SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->pMeterMeta; if (pMeta->meterType == TSDB_METER_METRIC || pMeta->meterType == TSDB_METER_OTABLE || pMeta->meterType == TSDB_METER_STABLE) { @@ -114,8 +106,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { SSqlRes *pRes = &pSql->res; // one column for each row - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SMeterMeta * pMeta = pMeterMetaInfo->pMeterMeta; /* @@ -127,7 +120,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { int32_t numOfRows = pMeta->numOfColumns; int32_t totalNumOfRows = numOfRows + pMeta->numOfTags; - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { numOfRows = pMeta->numOfColumns + pMeta->numOfTags; } @@ -135,31 +128,31 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { SSchema *pSchema = tsGetSchema(pMeta); for (int32_t i = 0; i < numOfRows; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, TSDB_COL_NAME_LEN); char *type = tDataTypeDesc[pSchema[i].type].aName; - pField = tscFieldInfoGetField(pCmd, 1); - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); + pField = tscFieldInfoGetField(pQueryInfo, 1); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); int32_t bytes = pSchema[i].bytes; if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { bytes = bytes / TSDB_NCHAR_SIZE; } - pField = tscFieldInfoGetField(pCmd, 2); - *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes; + pField = tscFieldInfoGetField(pQueryInfo, 2); + *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes; - pField = tscFieldInfoGetField(pCmd, 3); + pField = tscFieldInfoGetField(pQueryInfo, 3); if (i >= pMeta->numOfColumns && pMeta->numOfTags != 0) { - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i, "tag", + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i, "tag", strlen("tag") + 1); } } - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return 0; } @@ -167,27 +160,27 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { char *pTagValue = tsGetTagsValue(pMeta); for (int32_t i = numOfRows; i < totalNumOfRows; ++i) { // field name - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, TSDB_COL_NAME_LEN); // type name - pField = tscFieldInfoGetField(pCmd, 1); + pField = tscFieldInfoGetField(pQueryInfo, 1); char *type = tDataTypeDesc[pSchema[i].type].aName; - strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); + strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); // type length int32_t bytes = pSchema[i].bytes; - pField = tscFieldInfoGetField(pCmd, 2); + pField = tscFieldInfoGetField(pQueryInfo, 2); if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { bytes = bytes / TSDB_NCHAR_SIZE; } - *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes; + *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes; // tag value - pField = tscFieldInfoGetField(pCmd, 3); - char *target = pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i; + pField = tscFieldInfoGetField(pQueryInfo, 3); + char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i; if (isNull(pTagValue, pSchema[i].type)) { sprintf(target, "%s", TSDB_DATA_NULL_STR); @@ -201,22 +194,14 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { taosUcs4ToMbs(pTagValue, pSchema[i].bytes, target); break; case TSDB_DATA_TYPE_FLOAT: { -#ifdef _TD_ARM_32_ float fv = 0; - *(int32_t *)(&fv) = *(int32_t *)pTagValue; + fv = GET_FLOAT_VAL(pTagValue); sprintf(target, "%f", fv); -#else - sprintf(target, "%f", *(float *)pTagValue); -#endif } break; case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32_ double dv = 0; - *(int64_t *)(&dv) = *(int64_t *)pTagValue; + dv = GET_DOUBLE_VAL(pTagValue); sprintf(target, "%lf", dv); -#else - sprintf(target, "%lf", *(double *)pTagValue); -#endif } break; case TSDB_DATA_TYPE_TINYINT: sprintf(target, "%d", *(int8_t *)pTagValue); @@ -228,7 +213,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { sprintf(target, "%d", *(int32_t *)pTagValue); break; case TSDB_DATA_TYPE_BIGINT: - sprintf(target, "%lld", *(int64_t *)pTagValue); + sprintf(target, "%" PRId64 "", *(int64_t *)pTagValue); break; case TSDB_DATA_TYPE_BOOL: { char *val = (*((int8_t *)pTagValue) == 0) ? "false" : "true"; @@ -252,25 +237,28 @@ static int32_t tscBuildMeterSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, SSqlCmd *pCmd = &pSql->cmd; pCmd->numOfCols = numOfCols; - pCmd->order.order = TSQL_SO_ASC; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + pQueryInfo->order.order = TSQL_SO_ASC; - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN); rowLen += TSDB_COL_NAME_LEN; - tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength); rowLen += typeColLength; - tscFieldInfoSetValue(&pCmd->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t)); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t)); rowLen += sizeof(int32_t); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength); rowLen += noteColLength; return rowLen; } static int32_t tscProcessDescribeTable(SSqlObj *pSql) { - assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta != NULL); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + assert(tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta != NULL); const int32_t NUM_OF_DESCRIBE_TABLE_COLUMNS = 4; const int32_t TYPE_COLUMN_LENGTH = 16; @@ -283,7 +271,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) { int32_t rowLen = tscBuildMeterSchemaResultFields(pSql, NUM_OF_DESCRIBE_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, note_field_length); - tscFieldInfoCalOffset(&pSql->cmd); + tscFieldInfoCalOffset(pQueryInfo); return tscSetValueToResObj(pSql, rowLen); } @@ -293,7 +281,9 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) { // only need to reorganize the results in the column format SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; SSchema * pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); @@ -310,7 +300,7 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) { } int32_t totalNumOfResults = pMetricMeta->numOfMeters; - int32_t rowLen = tscGetResRowLength(pCmd); + int32_t rowLen = tscGetResRowLength(pQueryInfo); tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen); @@ -320,17 +310,17 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) { for (int32_t j = 0; j < pSidList->numOfSids; ++j) { SMeterSidExtInfo *pSidExt = tscGetMeterSidInfo(pSidList, j); - - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SColIndexEx *pColIndex = &tscSqlExprGet(pCmd, k)->colInfo; + + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SColIndexEx *pColIndex = &tscSqlExprGet(pQueryInfo, k)->colInfo; int16_t offsetId = pColIndex->colIdx; assert((pColIndex->flag & TSDB_COL_TAG) != 0); char * val = pSidExt->tags + vOffset[offsetId]; - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k); - memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, k) * totalNumOfResults + pField->bytes * rowIdx, val, + memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, k) * totalNumOfResults + pField->bytes * rowIdx, val, (size_t)pField->bytes); } rowIdx++; @@ -344,21 +334,23 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - SMetricMeta *pMetricMeta = tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMetricMeta *pMetricMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMetricMeta; int32_t totalNumOfResults = 1; // count function only produce one result - int32_t rowLen = tscGetResRowLength(pCmd); + int32_t rowLen = tscGetResRowLength(pQueryInfo); tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen); int32_t rowIdx = 0; for (int32_t i = 0; i < totalNumOfResults; ++i) { - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->colInfo.colIdx == -1 && pExpr->functionId == TSDB_FUNC_COUNT) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k); - memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, i) * totalNumOfResults + pField->bytes * rowIdx, + memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, i) * totalNumOfResults + pField->bytes * rowIdx, &pMetricMeta->numOfMeters, sizeof(pMetricMeta->numOfMeters)); } else { tscError("not support operations"); @@ -373,15 +365,17 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) { static int tscProcessQueryTags(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; - - SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMeterMeta *pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta; if (pMeterMeta == NULL || pMeterMeta->numOfTags == 0 || pMeterMeta->numOfColumns == 0) { strcpy(pCmd->payload, "invalid table"); pSql->res.code = TSDB_CODE_INVALID_TABLE; return pSql->res.code; } - SSqlExpr *pExpr = tscSqlExprGet(pCmd, 0); + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0); if (pExpr->functionId == TSDB_FUNC_COUNT) { return tscBuildMetricTagSqlFunctionResult(pSql); } else { @@ -390,7 +384,9 @@ static int tscProcessQueryTags(SSqlObj *pSql) { } static void tscProcessCurrentUser(SSqlObj *pSql) { - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, pSql->pTscObj->user, pExpr->aliasName, TSDB_USER_LEN); } @@ -403,19 +399,24 @@ static void tscProcessCurrentDB(SSqlObj *pSql) { setNull(db, TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN); } - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, db, pExpr->aliasName, TSDB_DB_NAME_LEN); } static void tscProcessServerVer(SSqlObj *pSql) { const char* v = pSql->pTscObj->sversion; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, v, pExpr->aliasName, tListLen(pSql->pTscObj->sversion)); } static void tscProcessClientVer(SSqlObj *pSql) { - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, version, pExpr->aliasName, strlen(version)); } @@ -433,7 +434,9 @@ static void tscProcessServStatus(SSqlObj *pSql) { } } - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); tscSetLocalQueryResult(pSql, "1", pExpr->aliasName, 2); } @@ -442,12 +445,16 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa SSqlRes *pRes = &pSql->res; pCmd->numOfCols = 1; - pCmd->order.order = TSQL_SO_ASC; - - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength); + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + pQueryInfo->order.order = TSQL_SO_ASC; + + tscClearFieldInfo(&pQueryInfo->fieldsInfo); + + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength); tscInitResObjForLocalQuery(pSql, 1, valueLength); - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0); strncpy(pRes->data, val, pField->bytes); } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 3360f6b26a1aa21ece9e14c10f3d46e6b287e386..b0d5a58bcf0c679f6dc35ce2f5f1eab650e2950d 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -18,11 +18,8 @@ #define _XOPEN_SOURCE -#pragma GCC diagnostic ignored "-Woverflow" -#pragma GCC diagnostic ignored "-Wunused-variable" - #include "os.h" -#include "ihash.h" +#include "hash.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tschemautil.h" @@ -39,7 +36,7 @@ enum { TSDB_USE_CLI_TS = 1, }; -static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize); +static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows); static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) { int32_t numType = isValidNumber(pToken); @@ -74,8 +71,6 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) { } int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { - char * token; - int tokenlen; int32_t index = 0; SSQLToken sToken; int64_t interval; @@ -118,13 +113,12 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1 index = 0; sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL); pTokenEnd += index; - + if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) { - index = 0; valueToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL); pTokenEnd += index; - + if (valueToken.n < 2) { return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z); } @@ -132,7 +126,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1 if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - + if (timePrec == TSDB_TIME_PRECISION_MILLI) { interval /= 1000; } @@ -155,8 +149,8 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, int64_t iv; int32_t numType; char * endptr = NULL; - errno = 0; // clear the previous existed error information - + errno = 0; // clear the previous existed error information + switch (pSchema->type) { case TSDB_DATA_TYPE_BOOL: { // bool if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { @@ -196,7 +190,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, return tscInvalidSQLErrMsg(msg, "tinyint data overflow", pToken->z); } - *((int8_t *)payload) = (int8_t) iv; + *((int8_t *)payload) = (int8_t)iv; } break; @@ -315,6 +309,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, } strncpy(payload, pToken->z, pToken->n); + + if (pToken->n < pSchema->bytes) { + payload[pToken->n] = 0; // add the null-terminated char if the length of the string is shorter than the available space + } } break; @@ -323,7 +321,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload, if (pToken->type == TK_NULL) { *(uint32_t *)payload = TSDB_DATA_NCHAR_NULL; } else { - // if the converted output len is over than pSchema->bytes, return error: 'Argument list too long' + // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' if (!taosMbsToUcs4(pToken->z, pToken->n, payload, pSchema->bytes)) { char buf[512] = {0}; snprintf(buf, 512, "%s", strerror(errno)); @@ -377,7 +375,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start } else { if (pDataBlocks->tsSource == TSDB_USE_SERVER_TS) { return -1; // client time/server time can not be mixed - + } else if (pDataBlocks->tsSource == -1) { pDataBlocks->tsSource = TSDB_USE_CLI_TS; } @@ -392,9 +390,9 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start } int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error, - int16_t timePrec) { - int32_t index = 0; - bool isPrevOptr; + int16_t timePrec, int32_t *code, char *tmpTokenBuf) { + int32_t index = 0; + // bool isPrevOptr; //fang, never used SSQLToken sToken = {0}; char * payload = pDataBlocks->pData + pDataBlocks->size; @@ -402,8 +400,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ int32_t rowSize = 0; for (int i = 0; i < spd->numOfAssignedCols; ++i) { // the start position in data block buffer of current value in sql - char * start = payload + spd->elems[i].offset; - int16_t colIndex = spd->elems[i].colIndex; + char * start = payload + spd->elems[i].offset; + int16_t colIndex = spd->elems[i].colIndex; SSchema *pSchema = schema + colIndex; rowSize += pSchema->bytes; @@ -416,8 +414,9 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { continue; } - + strcpy(error, "client out of memory"); + *code = TSDB_CODE_CLI_OUT_OF_MEMORY; return -1; } @@ -425,23 +424,45 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ (sToken.type != TK_FLOAT) && (sToken.type != TK_BOOL) && (sToken.type != TK_NULL)) || (sToken.n == 0) || (sToken.type == TK_RP)) { tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z); + *code = TSDB_CODE_INVALID_SQL; return -1; } // Remove quotation marks if (TK_STRING == sToken.type) { - sToken.z++; - sToken.n -= 2; + // delete escape character: \\, \', \" + char delim = sToken.z[0]; + int32_t cnt = 0; + int32_t j = 0; + for (int32_t k = 1; k < sToken.n - 1; ++k) { + if (sToken.z[k] == delim || sToken.z[k] == '\\') { + if (sToken.z[k + 1] == delim) { + cnt++; + tmpTokenBuf[j] = sToken.z[k + 1]; + j++; + k++; + continue; + } + } + + tmpTokenBuf[j] = sToken.z[k]; + j++; + } + tmpTokenBuf[j] = 0; + sToken.z = tmpTokenBuf; + sToken.n -= 2 + cnt; } bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec); if (ret != TSDB_CODE_SUCCESS) { + *code = TSDB_CODE_INVALID_SQL; return -1; // NOTE: here 0 mean error! } if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z); + *code = TSDB_CODE_INVALID_TIME_STAMP; return -1; } } @@ -451,7 +472,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ char *ptr = payload; for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null + if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null setNull(ptr, schema[i].type, schema[i].bytes); } @@ -476,7 +497,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) { } int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMeta, int maxRows, - SParsedDataColInfo *spd, char *error) { + SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) { int32_t index = 0; SSQLToken sToken; @@ -487,6 +508,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe if (spd->hasVal[0] == false) { strcpy(error, "primary timestamp column can not be null"); + *code = TSDB_CODE_INVALID_SQL; return -1; } @@ -497,16 +519,19 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe *str += index; if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) { - int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize); - if (0 == tSize) { + int32_t tSize; + int32_t retcode = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize, &tSize); + if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client strcpy(error, "client out of memory"); + *code = retcode; return -1; } - maxRows += tSize; + ASSERT(tSize > maxRows); + maxRows = tSize; } - int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision); - if (len <= 0) { // error message has been set in tsParseOneRowData + int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf); + if (len <= 0) { // error message has been set in tsParseOneRowData return -1; } @@ -517,6 +542,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe *str += index; if (sToken.n == 0 || sToken.type != TK_RP) { tscInvalidSQLErrMsg(error, ") expected", *str); + *code = TSDB_CODE_INVALID_SQL; return -1; } @@ -525,6 +551,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe if (numOfRows <= 0) { strcpy(error, "no any data points"); + *code = TSDB_CODE_INVALID_SQL; return -1; } else { return numOfRows; @@ -545,11 +572,12 @@ static void tscSetAssignedColumnInfo(SParsedDataColInfo *spd, SSchema *pSchema, } } -int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) { +int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { size_t remain = pDataBlock->nAllocSize - pDataBlock->size; const int factor = 5; uint32_t nAllocSizeOld = pDataBlock->nAllocSize; - + assert(pDataBlock->headerSize >= 0); + // expand the allocated size if (remain < rowSize * factor) { while (remain < rowSize * factor) { @@ -562,14 +590,15 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) { pDataBlock->pData = tmp; memset(pDataBlock->pData + pDataBlock->size, 0, pDataBlock->nAllocSize - pDataBlock->size); } else { - //assert(false); - // do nothing + // do nothing, if allocate more memory failed pDataBlock->nAllocSize = nAllocSizeOld; - return 0; + *numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize; + return TSDB_CODE_CLI_OUT_OF_MEMORY; } } - return (int32_t)(pDataBlock->nAllocSize - pDataBlock->size) / rowSize; + *numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize; + return TSDB_CODE_SUCCESS; } static void tsSetBlockInfo(SShellSubmitBlock *pBlocks, const SMeterMeta *pMeterMeta, int32_t numOfRows) { @@ -625,25 +654,37 @@ void sortRemoveDuplicates(STableDataBlocks *dataBuf) { static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char **str, SParsedDataColInfo *spd, int32_t *totalNum) { SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; - STableDataBlocks *dataBuf = - tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE, - sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name); + STableDataBlocks *dataBuf = NULL; + int32_t ret = tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE, + sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name, + pMeterMeta, &dataBuf); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + int32_t maxNumOfRows; + ret = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize, &maxNumOfRows); + if (TSDB_CODE_SUCCESS != ret) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } - int32_t maxNumOfRows = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize); - if (0 == maxNumOfRows) { + int32_t code = TSDB_CODE_INVALID_SQL; + char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" + if (NULL == tmpTokenBuf) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } - int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload); + int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf); + free(tmpTokenBuf); if (numOfRows <= 0) { - return TSDB_CODE_INVALID_SQL; + return code; } for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) { - SParamInfo* param = dataBuf->params + i; + SParamInfo *param = dataBuf->params + i; if (param->idx == -1) { param->idx = pCmd->numOfParams++; param->offset -= sizeof(SShellSubmitBlock); @@ -664,16 +705,20 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char return TSDB_CODE_SUCCESS; } -static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { +static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { int32_t index = 0; - SSQLToken sToken; - SSQLToken tableToken; + SSQLToken sToken = {0}; + SSQLToken tableToken = {0}; int32_t code = TSDB_CODE_SUCCESS; - - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + + const int32_t TABLE_INDEX = 0; + const int32_t STABLE_INDEX = 1; + + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); char *sql = *sqlstr; + // get the token of specified table index = 0; tableToken = tStrGetToken(sql, &index, false, 0, NULL); @@ -710,40 +755,117 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { if (numOfColList == 0 && cstart != NULL) { return TSDB_CODE_INVALID_SQL; } - - if (sToken.type == TK_USING) { // create table if not exists + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, TABLE_INDEX); + + if (sToken.type == TK_USING) { // create table if not exists according to the super table index = 0; sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; STagData *pTag = (STagData *)pCmd->payload; memset(pTag, 0, sizeof(STagData)); - setMeterID(pSql, &sToken, 0); + + /* + * the source super table is moved to the secondary position of the pMeterMetaInfo list + */ + if (pQueryInfo->numOfTables < 2) { + tscAddEmptyMeterMetaInfo(pQueryInfo); + } + + SMeterMetaInfo *pSTableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, STABLE_INDEX); + setMeterID(pSTableMeterMetaInfo, &sToken, pSql); - strncpy(pTag->name, pMeterMetaInfo->name, TSDB_METER_ID_LEN); - code = tscGetMeterMeta(pSql, pTag->name, 0); + strncpy(pTag->name, pSTableMeterMetaInfo->name, TSDB_METER_ID_LEN); + code = tscGetMeterMeta(pSql, pSTableMeterMetaInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (!UTIL_METER_IS_SUPERTABLE(pSTableMeterMetaInfo)) { return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z); } - char * tagVal = pTag->data; - SSchema *pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); + SSchema *pTagSchema = tsGetTagSchema(pSTableMeterMetaInfo->pMeterMeta); index = 0; sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; + + SParsedDataColInfo spd = {0}; + + uint8_t numOfTags = pSTableMeterMetaInfo->pMeterMeta->numOfTags; + spd.numOfCols = numOfTags; + + // if specify some tags column + if (sToken.type != TK_LP) { + tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags); + } else { + /* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen) + * tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */ + int16_t offset[TSDB_MAX_COLUMNS] = {0}; + for (int32_t t = 1; t < numOfTags; ++t) { + offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes; + } + + while (1) { + index = 0; + sToken = tStrGetToken(sql, &index, false, 0, NULL); + sql += index; + + if (TK_STRING == sToken.type) { + sToken.n = strdequote(sToken.z); + strtrim(sToken.z); + sToken.n = (uint32_t)strlen(sToken.z); + } + + if (sToken.type == TK_RP) { + break; + } + + bool findColumnIndex = false; + + // todo speedup by using hash list + for (int32_t t = 0; t < numOfTags; ++t) { + if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) { + SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++]; + pElem->offset = offset[t]; + pElem->colIndex = t; + + if (spd.hasVal[t] == true) { + return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z); + } + + spd.hasVal[t] = true; + findColumnIndex = true; + break; + } + } + + if (!findColumnIndex) { + return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken.z); + } + } + + if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > numOfTags) { + return tscInvalidSQLErrMsg(pCmd->payload, "tag name expected", sToken.z); + } + + index = 0; + sToken = tStrGetToken(sql, &index, false, 0, NULL); + sql += index; + } + if (sToken.type != TK_TAGS) { - return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sql); + return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z); } - int32_t numOfTagValues = 0; uint32_t ignoreTokenTypes = TK_LP; uint32_t numOfIgnoreToken = 1; - while (1) { + for (int i = 0; i < spd.numOfAssignedCols; ++i) { + char * tagVal = pTag->data + spd.elems[i].offset; + int16_t colIndex = spd.elems[i].colIndex; + index = 0; sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes); sql += index; @@ -759,42 +881,60 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) { sToken.n -= 2; } - code = tsParseOneColumnData(&pTagSchema[numOfTagValues], &sToken, tagVal, pCmd->payload, &sql, false, - pMeterMetaInfo->pMeterMeta->precision); + code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, + pSTableMeterMetaInfo->pMeterMeta->precision); if (code != TSDB_CODE_SUCCESS) { return code; } - if ((pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_BINARY || - pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_NCHAR) && sToken.n > pTagSchema[numOfTagValues].bytes) { + if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY || pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) && + sToken.n > pTagSchema[colIndex].bytes) { return tscInvalidSQLErrMsg(pCmd->payload, "string too long", sToken.z); } + } - tagVal += pTagSchema[numOfTagValues++].bytes; + index = 0; + sToken = tStrGetToken(sql, &index, false, 0, NULL); + sql += index; + if (sToken.n == 0 || sToken.type != TK_RP) { + return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z); } - if (numOfTagValues != pMeterMetaInfo->pMeterMeta->numOfTags) { - return tscInvalidSQLErrMsg(pCmd->payload, "number of tags mismatch", sql); + // 2. set the null value for the columns that do not assign values + if (spd.numOfAssignedCols < spd.numOfCols) { + char *ptr = pTag->data; + + for (int32_t i = 0; i < spd.numOfCols; ++i) { + if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null + setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes); + } + + ptr += pTagSchema[i].bytes; + } } if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { - return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", sql); + return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr); } - int32_t ret = setMeterID(pSql, &tableToken, 0); + int32_t ret = setMeterID(pMeterMetaInfo, &tableToken, pSql); if (ret != TSDB_CODE_SUCCESS) { return ret; } createTable = true; - code = tscGetMeterMetaEx(pSql, pMeterMetaInfo->name, true); + code = tscGetMeterMetaEx(pSql, pMeterMetaInfo, true); + if (TSDB_CODE_ACTION_IN_PROGRESS == code) { + return code; + } + } else { if (cstart != NULL) { sql = cstart; } else { sql = sToken.z; } - code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + code = tscGetMeterMeta(pSql, pMeterMetaInfo); } int32_t len = cend - cstart + 1; @@ -819,6 +959,15 @@ int validateTableName(char *tblName, int len) { return tscValidateName(&token); } +static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) { + if (pCmd->dataSourceType != 0 && pCmd->dataSourceType != type) { + return tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sql); + } + + pCmd->dataSourceType = type; + return TSDB_CODE_SUCCESS; +} + /** * usage: insert into table1 values() () table2 values()() * @@ -828,44 +977,68 @@ int validateTableName(char *tblName, int len) { * @param pSql * @return */ -int doParserInsertSql(SSqlObj *pSql, char *str) { +int doParseInsertSql(SSqlObj *pSql, char *str) { SSqlCmd *pCmd = &pSql->cmd; - - int32_t code = TSDB_CODE_INVALID_SQL; + int32_t totalNum = 0; + int32_t code = TSDB_CODE_SUCCESS; + + SMeterMetaInfo *pMeterMetaInfo = NULL; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + assert(pQueryInfo != NULL); - SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); + if (pQueryInfo->numOfTables == 0) { + pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); + } else { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + } if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { return code; } + assert(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) + || ((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList))); + if ((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) { - pSql->pTableHashList = taosInitIntHash(128, POINTER_BYTES, taosHashInt); + pSql->pTableHashList = taosInitHashTable(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pSql->cmd.pDataBlocks = tscCreateBlockArrayList(); if (NULL == pSql->pTableHashList || NULL == pSql->cmd.pDataBlocks) { code = TSDB_CODE_CLI_OUT_OF_MEMORY; goto _error_clean; } } else { + assert((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList)); str = pSql->asyncTblPos; } - tscTrace("%p create data block list for submit data, %p", pSql, pSql->cmd.pDataBlocks); + tscTrace("%p create data block list for submit data:%p, asyncTblPos:%p, pTableHashList:%p", pSql, pSql->cmd.pDataBlocks, pSql->asyncTblPos, pSql->pTableHashList); while (1) { - int32_t index = 0; + int32_t index = 0; SSQLToken sToken = tStrGetToken(str, &index, false, 0, NULL); - if (sToken.n == 0) { // parse file, do not release the STableDataBlock - if (pCmd->isInsertFromFile == 1) { + + // no data in the sql string anymore. + if (sToken.n == 0) { + /* + * if the data is from the data file, no data has been generated yet. So, there no data to + * merge or submit, save the file path and parse the file in other routines. + */ + if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) { goto _clean; } - if (totalNum > 0) { - break; - } else { // no data in current sql string, error + /* + * if no data has been generated during parsing the sql string, error msg will return + * Otherwise, create the first submit block and submit to virtual node. + */ + if (totalNum == 0) { code = TSDB_CODE_INVALID_SQL; goto _error_clean; + } else { + break; } } @@ -877,27 +1050,35 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { goto _error_clean; } - //TODO refactor - if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) { + if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) { goto _error_clean; } void *fp = pSql->fp; - if ((code = tscParseSqlForCreateTableOnDemand(&str, pSql)) != TSDB_CODE_SUCCESS) { + ptrdiff_t pos = pSql->asyncTblPos - pSql->sqlstr; + + if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) { + /* + * For async insert, after get the metermeta from server, the sql string will not be + * parsed using the new metermeta to avoid the overhead cause by get metermeta data information. + * And during the getMeterMetaCallback function, the sql string will be parsed from the + * interrupted position. + */ if (fp != NULL) { - //goto _clean; - return code; - } else { - /* - * for async insert, the free data block operations, which is tscDestroyBlockArrayList, - * must be executed before launch another threads to get metermeta, since the - * later ops may manipulate SSqlObj through another thread in getMeterMetaCallback function. - */ - goto _error_clean; + if (TSDB_CODE_ACTION_IN_PROGRESS == code) { + tscTrace("async insert and waiting to get meter meta, then continue parse sql from offset: %" PRId64, pos); + return code; + } + + // todo add to return + tscError("async insert parse error, code:%d, %s", code, tsError[code]); + pSql->asyncTblPos = NULL; } + + goto _error_clean; // TODO: should _clean or _error_clean to async flow ???? } - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL); goto _error_clean; } @@ -905,8 +1086,9 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { index = 0; sToken = tStrGetToken(str, &index, false, 0, NULL); str += index; + if (sToken.n == 0) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z); + code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z); goto _error_clean; } @@ -916,13 +1098,8 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns); - if (pCmd->isInsertFromFile == -1) { - pCmd->isInsertFromFile = 0; - } else { - if (pCmd->isInsertFromFile == 1) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); - goto _error_clean; - } + if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { + goto _error_clean; } /* @@ -933,15 +1110,9 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { if (code != TSDB_CODE_SUCCESS) { goto _error_clean; } - } else if (sToken.type == TK_FILE) { - if (pCmd->isInsertFromFile == -1) { - pCmd->isInsertFromFile = 1; - } else { - if (pCmd->isInsertFromFile == 0) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); - goto _error_clean; - } + if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) { + goto _error_clean; } index = 0; @@ -964,20 +1135,23 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { strcpy(fname, full_path.we_wordv[0]); wordfree(&full_path); - STableDataBlocks *pDataBlock = tscCreateDataBlockEx(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize, - sizeof(SShellSubmitBlock), pMeterMetaInfo->name); + STableDataBlocks *pDataBlock = NULL; + SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; + + int32_t ret = tscCreateDataBlock(PATH_MAX, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name, + pMeterMeta, &pDataBlock); + if (ret != TSDB_CODE_SUCCESS) { + goto _error_clean; + } tscAppendDataBlock(pCmd->pDataBlocks, pDataBlock); strcpy(pDataBlock->filename, fname); } else if (sToken.type == TK_LP) { /* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */ - SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta; + SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta; SSchema * pSchema = tsGetSchema(pMeterMeta); - if (pCmd->isInsertFromFile == -1) { - pCmd->isInsertFromFile = 0; - } else if (pCmd->isInsertFromFile == 1) { - code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z); + if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { goto _error_clean; } @@ -1071,8 +1245,10 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { goto _error_clean; } + pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + // set the next sent data vnode index in data block arraylist - pCmd->vnodeIdx = 1; + pMeterMetaInfo->vnodeIndex = 1; } else { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); } @@ -1084,13 +1260,16 @@ _error_clean: pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); _clean: - taosCleanUpIntHash(pSql->pTableHashList); + taosCleanUpHashTable(pSql->pTableHashList); + pSql->pTableHashList = NULL; pSql->asyncTblPos = NULL; + pCmd->isParseFinish = 1; + return code; } -int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) { +int tsParseInsertSql(SSqlObj *pSql) { if (!pSql->pTscObj->writeAuth) { return TSDB_CODE_NO_RIGHTS; } @@ -1098,35 +1277,36 @@ int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) { int32_t index = 0; SSqlCmd *pCmd = &pSql->cmd; - SSQLToken sToken = tStrGetToken(sql, &index, false, 0, NULL); - + SSQLToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL); assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT); - pCmd->import = (sToken.type == TK_IMPORT); - - sToken = tStrGetToken(sql, &index, false, 0, NULL); + + pCmd->count = 0; + pCmd->command = TSDB_SQL_INSERT; + + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo); + + uint16_t type = (sToken.type == TK_INSERT)? TSDB_QUERY_TYPE_INSERT:TSDB_QUERY_TYPE_IMPORT; + TSDB_QUERY_SET_TYPE(pQueryInfo->type, type); + + sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL); if (sToken.type != TK_INTO) { return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z); } - - pCmd->count = 0; - pCmd->command = TSDB_SQL_INSERT; - pCmd->isInsertFromFile = -1; + pSql->res.numOfRows = 0; - - return doParserInsertSql(pSql, sql + index); + return doParseInsertSql(pSql, pSql->sqlstr + index); } -int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) { +int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion) { int32_t ret = TSDB_CODE_SUCCESS; - // must before clean the sqlcmd object - tscRemoveAllMeterMetaInfo(&pSql->cmd, false); - if (NULL == pSql->asyncTblPos) { - tscTrace("continue parse sql: %s", pSql->asyncTblPos); tscCleanSqlCmd(&pSql->cmd); + } else { + tscTrace("continue parse sql: %s", pSql->asyncTblPos); } - + if (tscIsInsertOrImportData(pSql->sqlstr)) { /* * only for async multi-vnode insertion @@ -1141,11 +1321,11 @@ int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) { pSql->fp = tscAsyncInsertMultiVnodesProxy; } - ret = tsParseInsertSql(pSql, pSql->sqlstr, acct, db); + ret = tsParseInsertSql(pSql); } else { ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); if (TSDB_CODE_SUCCESS != ret) return ret; - + SSqlInfo SQLInfo = {0}; tSQLParse(&SQLInfo, pSql->sqlstr); @@ -1168,7 +1348,8 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock int32_t code = TSDB_CODE_SUCCESS; SSqlCmd *pCmd = &pSql->cmd; - SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta; + assert(pCmd->numOfClause == 1); + SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta; SShellSubmitBlock *pBlocks = (SShellSubmitBlock *)(pTableDataBlocks->pData); tsSetBlockInfo(pBlocks, pMeterMeta, numOfRows); @@ -1190,55 +1371,56 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock return TSDB_CODE_SUCCESS; } -static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) { +static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) { size_t readLen = 0; char * line = NULL; size_t n = 0; int len = 0; - uint32_t maxRows = 0; + int32_t maxRows = 0; SSqlCmd * pCmd = &pSql->cmd; int numOfRows = 0; int32_t code = 0; int nrows = 0; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; + assert(pCmd->numOfClause == 1); + int32_t rowSize = pMeterMeta->rowSize; pCmd->pDataBlocks = tscCreateBlockArrayList(); - STableDataBlocks *pTableDataBlock = - tscCreateDataBlockEx(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name); + STableDataBlocks *pTableDataBlock = NULL; + int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, rowSize, sizeof(SShellSubmitBlock), + pMeterMetaInfo->name, pMeterMeta, &pTableDataBlock); + if (ret != TSDB_CODE_SUCCESS) { + return -1; + } tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock); - maxRows = tscAllocateMemIfNeed(pTableDataBlock, rowSize); - if (maxRows < 1) return -1; + code = tscAllocateMemIfNeed(pTableDataBlock, rowSize, &maxRows); + if (TSDB_CODE_SUCCESS != code) return -1; int count = 0; - SParsedDataColInfo spd = {.numOfCols = pMeterMetaInfo->pMeterMeta->numOfColumns}; - SSchema * pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); + SParsedDataColInfo spd = {.numOfCols = pMeterMeta->numOfColumns}; + SSchema * pSchema = tsGetSchema(pMeterMeta); - tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns); + tscSetAssignedColumnInfo(&spd, pSchema, pMeterMeta->numOfColumns); while ((readLen = getline(&line, &n, fp)) != -1) { // line[--readLen] = '\0'; if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0; - if (readLen <= 0) continue; + if (readLen == 0) continue; // fang, <= to == char *lineptr = line; strtolower(line, line); - - if (numOfRows >= maxRows || pTableDataBlock->size + pMeterMeta->rowSize >= pTableDataBlock->nAllocSize) { - uint32_t tSize = tscAllocateMemIfNeed(pTableDataBlock, pMeterMeta->rowSize); - if (0 == tSize) return (-TSDB_CODE_CLI_OUT_OF_MEMORY); - maxRows += tSize; - } - - len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision); + + len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf); if (len <= 0 || pTableDataBlock->numOfParams > 0) { - pSql->res.code = TSDB_CODE_INVALID_SQL; - return -1; + pSql->res.code = code; + return (-code); } - + pTableDataBlock->size += len; count++; @@ -1292,23 +1474,26 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) { } STableDataBlocks *pDataBlock = NULL; - SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + assert(pCmd->numOfClause == 1); + int32_t code = TSDB_CODE_SUCCESS; /* the first block has been sent to server in processSQL function */ - assert(pCmd->isInsertFromFile != -1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL); + assert(pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL); - if (pCmd->vnodeIdx < pCmd->pDataBlocks->nSize) { + if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) { SDataBlockList *pDataBlocks = pCmd->pDataBlocks; - for (int32_t i = pCmd->vnodeIdx; i < pDataBlocks->nSize; ++i) { + for (int32_t i = pMeterMetaInfo->vnodeIndex; i < pDataBlocks->nSize; ++i) { pDataBlock = pDataBlocks->pData[i]; if (pDataBlock == NULL) { continue; } if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) { - tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx, pDataBlocks->nSize); + tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, + pDataBlocks->nSize); continue; } @@ -1321,17 +1506,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) { } // multi-vnodes insertion in sync query model -void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { +void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; if (pCmd->command != TSDB_SQL_INSERT) { return; } - SMeterMetaInfo * pInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + STableDataBlocks *pDataBlock = NULL; int32_t affected_rows = 0; - assert(pCmd->isInsertFromFile == 1 && pCmd->pDataBlocks != NULL); + assert(pCmd->dataSourceType == DATA_FROM_DATA_FILE && pCmd->pDataBlocks != NULL); SDataBlockList *pDataBlockList = pCmd->pDataBlocks; pCmd->pDataBlocks = NULL; @@ -1342,7 +1529,7 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { if (pDataBlock == NULL) { continue; } - + if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) { tscError("%p failed to malloc when insert file", pSql); continue; @@ -1357,16 +1544,24 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) { continue; } - strncpy(pInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN); + strncpy(pMeterMetaInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN); memset(pDataBlock->pData, 0, pDataBlock->nAllocSize); - int32_t ret = tscGetMeterMeta(pSql, pInfo->name, 0); + int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo); if (ret != TSDB_CODE_SUCCESS) { tscError("%p get meter meta failed, abort", pSql); continue; } - int nrows = tscInsertDataFromFile(pSql, fp); + char *tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" + if (NULL == tmpTokenBuf) { + tscError("%p calloc failed", pSql); + continue; + } + + int nrows = tscInsertDataFromFile(pSql, fp, tmpTokenBuf); + free(tmpTokenBuf); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); if (nrows < 0) { diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 532baec20545b941433268d15323c9f29a5bb27f..4ab63c18e9ad339664b1771bead46aedcb0c9d48 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -22,7 +22,7 @@ #include "tstrbuild.h" -int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db); +int tsParseInsertSql(SSqlObj *pSql); int taos_query_imp(STscObj* pObj, SSqlObj* pSql); //////////////////////////////////////////////////////////////////////////////// @@ -75,7 +75,6 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_ if (isParam) { ++stmt->numParams; } - return TSDB_CODE_SUCCESS; } @@ -122,11 +121,11 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { break; case TSDB_DATA_TYPE_FLOAT: - var->dKey = *(float*)tb->buffer; + var->dKey = GET_FLOAT_VAL(tb->buffer); break; case TSDB_DATA_TYPE_DOUBLE: - var->dKey = *(double*)tb->buffer; + var->dKey = GET_DOUBLE_VAL(tb->buffer); break; case TSDB_DATA_TYPE_BINARY: @@ -386,12 +385,11 @@ static int insertStmtAddBatch(STscStmt* stmt) { } static int insertStmtPrepare(STscStmt* stmt) { - STscObj* taos = stmt->taos; SSqlObj *pSql = stmt->pSql; pSql->cmd.numOfParams = 0; pSql->cmd.batchSize = 0; - return tsParseInsertSql(pSql, pSql->sqlstr, taos->acctId, taos->db); + return tsParseInsertSql(pSql); } static int insertStmtReset(STscStmt* pStmt) { @@ -409,7 +407,9 @@ static int insertStmtReset(STscStmt* pStmt) { } } pCmd->batchSize = 0; - pCmd->vnodeIdx = 0; + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + pMeterMetaInfo->vnodeIndex = 0; return TSDB_CODE_SUCCESS; } @@ -422,6 +422,9 @@ static int insertStmtExecute(STscStmt* stmt) { ++pCmd->batchSize; } + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + assert(pCmd->numOfClause == 1); + if (pCmd->pDataBlocks->nSize > 0) { // merge according to vgid int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks); @@ -436,7 +439,7 @@ static int insertStmtExecute(STscStmt* stmt) { } // set the next sent data vnode index in data block arraylist - pCmd->vnodeIdx = 1; + pMeterMetaInfo->vnodeIndex = 1; } else { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); } @@ -445,6 +448,8 @@ static int insertStmtExecute(STscStmt* stmt) { SSqlRes *pRes = &pSql->res; pRes->numOfRows = 0; pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; + pRes->qhandle = 0; pSql->thandle = NULL; diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index 61bc9dd99ee7254015f6c457843b70c4c60223d1..a7a774b3a8ce71a608d15ec9a71f931a7a59a06a 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -93,10 +93,10 @@ void tscSaveSlowQuery(SSqlObj *pSql) { const static int64_t SLOW_QUERY_INTERVAL = 3000000L; if (pSql->res.useconds < SLOW_QUERY_INTERVAL) return; - tscTrace("%p query time:%lld sql:%s", pSql, pSql->res.useconds, pSql->sqlstr); + tscTrace("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr); char *sql = malloc(200); - int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %lld, %lld, '", tsMonitorDbName, + int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName, pSql->pTscObj->user, pSql->stime, pSql->res.useconds); int sqlLen = snprintf(sql + len, TSDB_SHOW_SQL_LEN, "%s", pSql->sqlstr); if (sqlLen > TSDB_SHOW_SQL_LEN - 1) { @@ -197,13 +197,15 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { } pthread_mutex_unlock(&pObj->mutex); + + if (pStream) { + tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); + } - tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); - - taos_close_stream(pStream); if (pStream->callback) { pStream->callback(pStream->param); } + taos_close_stream(pStream); } char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) { @@ -283,8 +285,9 @@ void tscKillConnection(STscObj *pObj) { SSqlStream *pStream = pObj->streamList; while (pStream) { + SSqlStream *tmp = pStream->next; taos_close_stream(pStream); - pStream = pStream->next; + pStream = tmp; } pthread_mutex_unlock(&pObj->mutex); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b6b2c0fd1878cb55333944bab6a72b022b71cd2a..fc7db8f39566dae919728661dd6d31af9a74e1c1 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -20,14 +20,13 @@ #include "taos.h" #include "taosmsg.h" #include "tstoken.h" +#include "tstrbuild.h" #include "ttime.h" +#include "tscSQLParser.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" -#include "tscSQLParser.h" - -#pragma GCC diagnostic ignored "-Wunused-variable" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -43,16 +42,10 @@ typedef struct SColumnList { SColumnIndex ids[TSDB_MAX_COLUMNS]; } SColumnList; -typedef struct SColumnIdListRes { - SSchema* pSchema; - int32_t numOfCols; - SColumnList list; -} SColumnIdListRes; - -static SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx, int32_t tableIndex); +static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIdx, int32_t tableIndex); static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); -static char* getAccountId(SSqlObj* pSql); +static char* getAccountId(SSqlObj* pSql); static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name); static void getCurrentDBName(SSqlObj* pSql, SSQLToken* pDBToken); @@ -60,75 +53,82 @@ static bool hasSpecifyDB(SSQLToken* pTableName); static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd); static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd); -static int32_t setObjFullName(char* fullName, char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* len); +static int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* len); static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength); static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName); -static int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem); -static int32_t insertResultField(SSqlCmd* pCmd, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, - char* fieldName); +static int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIdx, tSQLExprItem* pItem); +static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, + int8_t type, char* fieldName); static int32_t changeFunctionID(int32_t optr, int16_t* functionId); -static int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric); +static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable); -static bool validateIpAddress(char* ip); -static bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd); -static bool functionCompatibleCheck(SSqlCmd* pCmd); -static void setColumnOffsetValueInResultset(SSqlCmd* pCmd); +static bool validateIpAddress(const char* ip, size_t size); +static bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo); +static bool functionCompatibleCheck(SQueryInfo* pQueryInfo); +static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo); -static int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList); +static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd); -static int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql); -static int32_t setSlidingClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql); +static int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); +static int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); -static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem); +static int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem); -static int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr); -static int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL); -static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema, int32_t numOfCols); +static int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql); +static int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL); +static int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema); -static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd); +static int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo); static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); -static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd); +static int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo); static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString); -static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd); -static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols, SColumnIdListRes* pList); +static int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo); +static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList); static int32_t validateDNodeConfig(tDCLSQL* pOptions); static int32_t validateLocalConfig(tDCLSQL* pOptions); static int32_t validateColumnName(char* name); static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField); -static bool hasTimestampForPointInterpQuery(SSqlCmd* pCmd); -static void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex); +static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo); +static bool hasDefaultQueryTimeRange(SQueryInfo *pQueryInfo); + +static void updateTagColumnIndex(SQueryInfo* pQueryInfo, int32_t tableIndex); -static int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql); +static int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql); static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql); -static int32_t getColumnIndexByNameEx(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex); -static int32_t getTableIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex); +static int32_t getColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); +static int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); static int32_t optrToString(tSQLExpr* pExpr, char** exprString); -static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex* pIndex); -static int32_t doFunctionsCompatibleCheck(SSqlObj* pSql); -static int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd); -static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg *pCreate); +static int32_t getMeterIndex(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); +static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); +static int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); +static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate); static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex); +static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo); +static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo); +static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); +static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); + /* * Used during parsing query sql. Since the query sql usually small in length, error position * is not needed in the final error message. */ -static int32_t invalidSqlErrMsg(SSqlCmd *pCmd, const char* errMsg) { - return tscInvalidSQLErrMsg(pCmd->payload, errMsg, NULL); +static int32_t invalidSqlErrMsg(char* dstBuffer, const char* errMsg) { + return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL); } -static int32_t tscQueryOnlyMetricTags(SSqlCmd* pCmd, bool* queryOnMetricTags) { - assert(QUERY_IS_STABLE_QUERY(pCmd->type)); +static int32_t tscQueryOnlyMetricTags(SQueryInfo* pQueryInfo, bool* queryOnMetricTags) { + assert(QUERY_IS_STABLE_QUERY(pQueryInfo->type)); *queryOnMetricTags = true; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_TAGPRJ && !(pExpr->functionId == TSDB_FUNC_COUNT && pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX)) { @@ -140,21 +140,21 @@ static int32_t tscQueryOnlyMetricTags(SSqlCmd* pCmd, bool* queryOnMetricTags) { return TSDB_CODE_SUCCESS; } -static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, tVariant* pVar) { +static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVar) { int64_t time = 0; const char* msg = "invalid timestamp"; strdequote(pVar->pz); char* seg = strnchr(pVar->pz, '-', pVar->nLen, false); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); if (seg != NULL) { if (taosParseTime(pVar->pz, &time, pVar->nLen, pMeterMetaInfo->pMeterMeta->precision) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } else { if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT)) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } @@ -164,154 +164,137 @@ static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, tVariant* pVar) { return TSDB_CODE_SUCCESS; } +static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) { + const char* msg1 = "password can not be empty"; + const char* msg2 = "name or password too long"; + const char* msg3 = "password needs single quote marks enclosed"; + + if (pPwd->type != TK_STRING) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + strdequote(pPwd->z); + strtrim(pPwd->z); // trim space before and after passwords + pPwd->n = strlen(pPwd->z); + + if (pPwd->n <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pPwd->n > TSDB_PASSWORD_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + return TSDB_CODE_SUCCESS; +} + // todo handle memory leak in error handle function int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) { return TSDB_CODE_APP_ERROR; } - SSqlCmd* pCmd = &(pSql->cmd); + SSqlCmd* pCmd = &(pSql->cmd); + SQueryInfo* pQueryInfo = NULL; - if (!pInfo->validSql) { - return invalidSqlErrMsg(pCmd, pInfo->pzErrMsg); + if (!pInfo->valid) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), pInfo->pzErrMsg); } - SMeterMetaInfo* pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); + int32_t code = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo); + assert(pQueryInfo->numOfTables == 0); + + SMeterMetaInfo* pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); - // transfer pInfo into select operation - switch (pInfo->sqlType) { - case DROP_TABLE: - case DROP_USER: - case DROP_ACCOUNT: - case DROP_DNODE: - case DROP_DATABASE: { - const char* msg = "param name too long"; + pCmd->command = pInfo->type; + + switch (pInfo->type) { + case TSDB_SQL_DROP_TABLE: + case TSDB_SQL_DROP_USER: + case TSDB_SQL_DROP_ACCT: + case TSDB_SQL_DROP_DNODE: + case TSDB_SQL_DROP_DB: { const char* msg1 = "invalid ip address"; const char* msg2 = "invalid name"; + const char* msg3 = "param name too long"; SSQLToken* pzName = &pInfo->pDCLInfo->a[0]; - if ((pInfo->sqlType != DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) { - return invalidSqlErrMsg(pCmd, msg2); + if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (pInfo->sqlType == DROP_DATABASE) { - assert(pInfo->pDCLInfo->nTokens == 2); - - pCmd->command = TSDB_SQL_DROP_DB; - pCmd->existsCheck = (pInfo->pDCLInfo->a[1].n == 1); + if (pInfo->type == TSDB_SQL_DROP_DB) { + assert(pInfo->pDCLInfo->nTokens == 1); - int32_t code = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL); + code = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL); if (code != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - return code; - } else if (pInfo->sqlType == DROP_TABLE) { - assert(pInfo->pDCLInfo->nTokens == 2); - - pCmd->existsCheck = (pInfo->pDCLInfo->a[1].n == 1); - pCmd->command = TSDB_SQL_DROP_TABLE; + } else if (pInfo->type == TSDB_SQL_DROP_TABLE) { + assert(pInfo->pDCLInfo->nTokens == 1); - int32_t ret = setMeterID(pSql, pzName, 0); - if (ret != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg); + if (setMeterID(pMeterMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - return ret; - } else { - if (pzName->n > TSDB_USER_LEN) { - return invalidSqlErrMsg(pCmd, msg); + } else if (pInfo->type == TSDB_SQL_DROP_DNODE) { + if (!validateIpAddress(pzName->z, pzName->n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pInfo->sqlType == DROP_USER) { - pCmd->command = TSDB_SQL_DROP_USER; - } else if (pInfo->sqlType == DROP_ACCOUNT) { - pCmd->command = TSDB_SQL_DROP_ACCT; - } else if (pInfo->sqlType == DROP_DNODE) { - pCmd->command = TSDB_SQL_DROP_DNODE; - const int32_t MAX_IP_ADDRESS_LEGNTH = 16; - - if (pzName->n > MAX_IP_ADDRESS_LEGNTH) { - return invalidSqlErrMsg(pCmd, msg1); - } - - char str[128] = {0}; - strncpy(str, pzName->z, pzName->n); - if (!validateIpAddress(str)) { - return invalidSqlErrMsg(pCmd, msg1); - } + strncpy(pMeterMetaInfo->name, pzName->z, pzName->n); + } else { // drop user + if (pzName->n > TSDB_USER_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } strncpy(pMeterMetaInfo->name, pzName->z, pzName->n); - return TSDB_CODE_SUCCESS; } - } - case USE_DATABASE: { - const char* msg = "db name too long"; - pCmd->command = TSDB_SQL_USE_DB; + break; + } - SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; + case TSDB_SQL_USE_DB: { + const char* msg = "invalid db name"; + SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, "invalid db name"); - } - - if (pToken->n > TSDB_DB_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pToken, NULL, NULL); if (ret != TSDB_CODE_SUCCESS) { - return ret; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } break; } - case RESET_QUERY_CACHE: { - pCmd->command = TSDB_SQL_RESET_CACHE; + case TSDB_SQL_RESET_CACHE: { + return TSDB_CODE_SUCCESS; + } + + case TSDB_SQL_SHOW: { + if (setShowInfo(pSql, pInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + break; } - case SHOW_DATABASES: - case SHOW_TABLES: - case SHOW_STABLES: - case SHOW_MNODES: - case SHOW_DNODES: - case SHOW_ACCOUNTS: - case SHOW_USERS: - case SHOW_VGROUPS: - case SHOW_MODULES: - case SHOW_CONNECTIONS: - case SHOW_QUERIES: - case SHOW_STREAMS: - case SHOW_SCORES: - case SHOW_GRANTS: - case SHOW_CONFIGS: - case SHOW_VNODES: { - return setShowInfo(pSql, pInfo); - } - - case ALTER_DATABASE: - case CREATE_DATABASE: { + case TSDB_SQL_ALTER_DB: + case TSDB_SQL_CREATE_DB: { + const char* msg1 = "invalid db name"; const char* msg2 = "name too long"; - const char* msg3 = "invalid db name"; - - if (pInfo->sqlType == ALTER_DATABASE) { - pCmd->command = TSDB_SQL_ALTER_DB; - } else { - pCmd->command = TSDB_SQL_CREATE_DB; - pCmd->existsCheck = (pInfo->pDCLInfo->a[0].n == 1); - } SCreateDBInfo* pCreateDB = &(pInfo->pDCLInfo->dbOpt); if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname), NULL, NULL); if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } if (parseCreateDBOptions(pCmd, pCreateDB) != TSDB_CODE_SUCCESS) { @@ -321,283 +304,167 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { break; } - case CREATE_DNODE: { - // todo parse hostname - pCmd->command = TSDB_SQL_CREATE_DNODE; + case TSDB_SQL_CREATE_DNODE: { // todo parse hostname const char* msg = "invalid ip address"; - char ipAddr[64] = {0}; - const int32_t MAX_IP_ADDRESS_LENGTH = 16; - if (pInfo->pDCLInfo->nTokens > 1 || pInfo->pDCLInfo->a[0].n > MAX_IP_ADDRESS_LENGTH) { - return invalidSqlErrMsg(pCmd, msg); - } - - memcpy(ipAddr, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); - if (validateIpAddress(ipAddr) == false) { - return invalidSqlErrMsg(pCmd, msg); - } - - strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); - break; - } - - case CREATE_ACCOUNT: - case CREATE_USER: { - pCmd->command = (pInfo->sqlType == CREATE_USER) ? TSDB_SQL_CREATE_USER : TSDB_SQL_CREATE_ACCT; - assert(pInfo->pDCLInfo->nTokens >= 2); - - const char* msg = "name or password too long"; - const char* msg1 = "password can not be empty"; - const char* msg2 = "invalid user/account name"; - const char* msg3 = "password needs single quote marks enclosed"; - const char* msg4 = "invalid state option, available options[no, r, w, all]"; - - if (pInfo->pDCLInfo->a[1].type != TK_STRING) { - return invalidSqlErrMsg(pCmd, msg3); - } - - strdequote(pInfo->pDCLInfo->a[1].z); - strtrim(pInfo->pDCLInfo->a[1].z); // trim space before and after passwords - pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); - - if (pInfo->pDCLInfo->a[1].n <= 0) { - return invalidSqlErrMsg(pCmd, msg1); - } - - if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - return invalidSqlErrMsg(pCmd, msg); + if (pInfo->pDCLInfo->nTokens > 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); + SSQLToken* pIpAddr = &pInfo->pDCLInfo->a[0]; + if (!validateIpAddress(pIpAddr->z, pIpAddr->n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name - strncpy(pCmd->payload, pInfo->pDCLInfo->a[1].z, pInfo->pDCLInfo->a[1].n); // passwd - - if (pInfo->sqlType == CREATE_ACCOUNT) { - SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt; - - pCmd->defaultVal[0] = pAcctOpt->users; - pCmd->defaultVal[1] = pAcctOpt->dbs; - pCmd->defaultVal[2] = pAcctOpt->tseries; - pCmd->defaultVal[3] = pAcctOpt->streams; - pCmd->defaultVal[4] = pAcctOpt->pps; - pCmd->defaultVal[5] = pAcctOpt->storage; - pCmd->defaultVal[6] = pAcctOpt->qtime; - pCmd->defaultVal[7] = pAcctOpt->conns; - - if (pAcctOpt->stat.n == 0) { - pCmd->defaultVal[8] = -1; - } else { - strdequote(pAcctOpt->stat.z); - pAcctOpt->stat.n = strlen(pAcctOpt->stat.z); - - if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_READ_ACCCESS; - } else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_WRITE_ACCCESS; - } else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) { - pCmd->defaultVal[8] = TSDB_VN_ALL_ACCCESS; - } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { - pCmd->defaultVal[8] = 0; - } else { - return invalidSqlErrMsg(pCmd, msg4); - } - } - } break; } - case ALTER_ACCT: { - pCmd->command = TSDB_SQL_ALTER_ACCT; - int32_t num = pInfo->pDCLInfo->nTokens; - assert(num >= 1 && num <= 2); - const char* msg = "password too long"; - const char* msg1 = "password can not be empty"; + case TSDB_SQL_CREATE_ACCT: + case TSDB_SQL_ALTER_ACCT: { + const char* msg1 = "invalid state option, available options[no, r, w, all]"; const char* msg2 = "invalid user/account name"; - const char* msg3 = "password needs single quote marks enclosed"; - const char* msg4 = "invalid state option, available options[no, r, w, all]"; - - if (num == 2) { - if (pInfo->pDCLInfo->a[1].type != TK_STRING) { - return invalidSqlErrMsg(pCmd, msg3); - } - - strdequote(pInfo->pDCLInfo->a[1].z); - strtrim(pInfo->pDCLInfo->a[1].z); // trim space before and after passwords - pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); - - if (pInfo->pDCLInfo->a[1].n <= 0) { - return invalidSqlErrMsg(pCmd, msg1); - } + const char* msg3 = "name too long"; - if (pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - return invalidSqlErrMsg(pCmd, msg); - } + SSQLToken* pName = &pInfo->pDCLInfo->user.user; + SSQLToken* pPwd = &pInfo->pDCLInfo->user.passwd; - strncpy(pCmd->payload, pInfo->pDCLInfo->a[1].z, pInfo->pDCLInfo->a[1].n); // passwd + if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN) { - return invalidSqlErrMsg(pCmd, msg); + if (pName->n > TSDB_USER_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); + if (tscValidateName(pName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - strncpy(pMeterMetaInfo->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name - SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt; - pCmd->defaultVal[0] = pAcctOpt->users; - pCmd->defaultVal[1] = pAcctOpt->dbs; - pCmd->defaultVal[2] = pAcctOpt->tseries; - pCmd->defaultVal[3] = pAcctOpt->streams; - pCmd->defaultVal[4] = pAcctOpt->pps; - pCmd->defaultVal[5] = pAcctOpt->storage; - pCmd->defaultVal[6] = pAcctOpt->qtime; - pCmd->defaultVal[7] = pAcctOpt->conns; - - if (pAcctOpt->stat.n == 0) { - pCmd->defaultVal[8] = -1; - } else { - strdequote(pAcctOpt->stat.z); - pAcctOpt->stat.n = strlen(pAcctOpt->stat.z); - + if (pAcctOpt->stat.n > 0) { if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_READ_ACCCESS; } else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) { - pCmd->defaultVal[8] = TSDB_VN_WRITE_ACCCESS; } else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) { - pCmd->defaultVal[8] = TSDB_VN_ALL_ACCCESS; } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { - pCmd->defaultVal[8] = 0; } else { - return invalidSqlErrMsg(pCmd, msg4); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } + break; } - case DESCRIBE_TABLE: { - pCmd->command = TSDB_SQL_DESCRIBE_TABLE; + case TSDB_SQL_DESCRIBE_TABLE: { SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; - const char* msg = "table name is too long"; + const char* msg2 = "table name is too long"; const char* msg1 = "invalid table name"; if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } if (pToken->n > TSDB_METER_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (setMeterID(pSql, pToken, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if (setMeterID(pMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - break; + return tscGetMeterMeta(pSql, pMeterMetaInfo); } - case ALTER_DNODE: - case ALTER_USER_PASSWD: - case ALTER_USER_PRIVILEGES: { - pCmd->command = (pInfo->sqlType == ALTER_DNODE) ? TSDB_SQL_CFG_DNODE : TSDB_SQL_ALTER_USER; - tDCLSQL* pDCL = pInfo->pDCLInfo; - - const char* msg = "parameters too long"; + case TSDB_SQL_CFG_DNODE: { const char* msg1 = "invalid ip address"; const char* msg2 = "invalid configure options or values"; - const char* msg3 = "password can not be empty"; - if (pInfo->sqlType != ALTER_DNODE) { - strdequote(pDCL->a[1].z); - strtrim(pDCL->a[1].z); - pDCL->a[1].n = strlen(pDCL->a[1].z); + /* validate the ip address */ + tDCLSQL* pDCL = pInfo->pDCLInfo; + if (!validateIpAddress(pDCL->a[0].z, pDCL->a[0].n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pDCL->a[1].n <= 0) { - return invalidSqlErrMsg(pCmd, msg3); + /* validate the parameter names and options */ + if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (pDCL->a[0].n > TSDB_METER_NAME_LEN || pDCL->a[1].n > TSDB_PASSWORD_LEN) { - return invalidSqlErrMsg(pCmd, msg); - } + char* pMsg = pCmd->payload + tsRpcHeadSize; + pMsg += sizeof(SMgmtHead); - if (pCmd->command == TSDB_SQL_CFG_DNODE) { - char ip[128] = {0}; - strncpy(ip, pDCL->a[0].z, pDCL->a[0].n); + SCfgMsg* pCfg = (SCfgMsg*)pMsg; + strncpy(pCfg->ip, pDCL->a[0].z, pDCL->a[0].n); - /* validate the ip address */ - if (!validateIpAddress(ip)) { - return invalidSqlErrMsg(pCmd, msg1); - } + strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n); - strcpy(pMeterMetaInfo->name, ip); + if (pDCL->nTokens == 3) { + pCfg->config[pDCL->a[1].n] = ' '; // add sep + strncpy(&pCfg->config[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n); + } - /* validate the parameter names and options */ - if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); - } + break; + } - strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); + case TSDB_SQL_CREATE_USER: + case TSDB_SQL_ALTER_USER: { + const char* msg5 = "invalid user rights"; + const char* msg7 = "not support options"; + const char* msg2 = "invalid user/account name"; + const char* msg3 = "name too long"; - if (pDCL->nTokens == 3) { - pCmd->payload[pDCL->a[1].n] = ' '; // add sep - strncpy(&pCmd->payload[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n); - } - } else { - const char* msg = "invalid user rights"; - const char* msg1 = "password can not be empty or larger than 24 characters"; + pCmd->command = pInfo->type; + //tDCLSQL* pDCL = pInfo->pDCLInfo; - strncpy(pMeterMetaInfo->name, pDCL->a[0].z, pDCL->a[0].n); + SUserInfo* pUser = &pInfo->pDCLInfo->user; + SSQLToken* pName = &pUser->user; + SSQLToken* pPwd = &pUser->passwd; - if (pInfo->sqlType == ALTER_USER_PASSWD) { - /* update the password for user */ - pCmd->order.order |= TSDB_ALTER_USER_PASSWD; + if (pName->n > TSDB_USER_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } - strdequote(pDCL->a[1].z); - pDCL->a[1].n = strlen(pDCL->a[1].z); + if (tscValidateName(pName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } - if (pDCL->a[1].n <= 0 || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { - /* password cannot be empty string */ - return invalidSqlErrMsg(pCmd, msg1); + if (pCmd->command == TSDB_SQL_CREATE_USER) { + if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + } else { + if (pUser->type == TSDB_ALTER_USER_PASSWD) { + if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } + } else if (pUser->type == TSDB_ALTER_USER_PRIVILEGES) { + assert(pPwd->type == TSDB_DATA_TYPE_NULL); - strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); - } else if (pInfo->sqlType == ALTER_USER_PRIVILEGES) { - pCmd->order.order |= TSDB_ALTER_USER_PRIVILEGES; + SSQLToken* pPrivilege = &pUser->privilege; - if (strncasecmp(pDCL->a[1].z, "super", 5) == 0 && pDCL->a[1].n == 5) { + if (strncasecmp(pPrivilege->z, "super", 5) == 0 && pPrivilege->n == 5) { pCmd->count = 1; - } else if (strncasecmp(pDCL->a[1].z, "read", 4) == 0 && pDCL->a[1].n == 4) { + } else if (strncasecmp(pPrivilege->z, "read", 4) == 0 && pPrivilege->n == 4) { pCmd->count = 2; - } else if (strncasecmp(pDCL->a[1].z, "write", 5) == 0 && pDCL->a[1].n == 5) { + } else if (strncasecmp(pPrivilege->z, "write", 5) == 0 && pPrivilege->n == 5) { pCmd->count = 3; } else { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } } else { - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } } + break; } - case ALTER_LOCAL: { - pCmd->command = TSDB_SQL_CFG_LOCAL; + + case TSDB_SQL_CFG_LOCAL: { tDCLSQL* pDCL = pInfo->pDCLInfo; const char* msg = "invalid configure options or values"; // validate the parameter names and options if (validateLocalConfig(pDCL) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } strncpy(pCmd->payload, pDCL->a[0].z, pDCL->a[0].n); @@ -608,453 +475,100 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { break; } - case TSQL_CREATE_NORMAL_METER: - case TSQL_CREATE_NORMAL_METRIC: { - const char* msg = "table name too long"; - const char* msg1 = "invalid table name"; - - tFieldList* pFieldList = pInfo->pCreateTableInfo->colInfo.pColumns; - tFieldList* pTagList = pInfo->pCreateTableInfo->colInfo.pTagColumns; - assert(pFieldList != NULL); - - pCmd->command = TSDB_SQL_CREATE_TABLE; - pCmd->existsCheck = pInfo->pCreateTableInfo->existCheck; - - // if sql specifies db, use it, otherwise use default db - SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); - - if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - if (setMeterID(pSql, pzTableName, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - if (!validateTableColumnInfo(pFieldList, pCmd) || - (pTagList != NULL && !validateTagParams(pTagList, pFieldList, pCmd))) { - return TSDB_CODE_INVALID_SQL; - } - - int32_t col = 0; - for (; col < pFieldList->nField; ++col) { - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, col, &pFieldList->p[col]); - } - pCmd->numOfCols = (int16_t)pFieldList->nField; - - if (pTagList != NULL) { // create metric[optional] - for (int32_t i = 0; i < pTagList->nField; ++i) { - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, col++, &pTagList->p[i]); - } - pCmd->count = pTagList->nField; - } - - break; - } - case TSQL_CREATE_METER_FROM_METRIC: { - pCmd->command = TSDB_SQL_CREATE_TABLE; - pCmd->existsCheck = pInfo->pCreateTableInfo->existCheck; - - const char* msg = "invalid table name"; - const char* msg1 = "illegal value or data overflow"; - const char* msg2 = "illegal number of tags"; - const char* msg3 = "tag value too long"; - - // table name - // metric name, create table by using dst - SSQLToken* pToken = &(pInfo->pCreateTableInfo->usingInfo.metricName); - - if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - if (setMeterID(pSql, pToken, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - // get meter meta from mnode - STagData* pTag = (STagData*)pCmd->payload; - strncpy(pTag->name, pMeterMetaInfo->name, TSDB_METER_ID_LEN); - - tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; - - int32_t code = tscGetMeterMeta(pSql, pTag->name, 0); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - if (pMeterMetaInfo->pMeterMeta->numOfTags != pList->nExpr) { - return invalidSqlErrMsg(pCmd, msg2); - } - - // too long tag values will return invalid sql, not be truncated automatically - SSchema* pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); - - char* tagVal = pTag->data; - for (int32_t i = 0; i < pList->nExpr; ++i) { - int32_t ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type); - if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - // validate the length of binary - if ((pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) && - pList->a[i].pVar.nLen > pTagSchema[i].bytes) { - return invalidSqlErrMsg(pCmd, msg3); - } - - tagVal += pTagSchema[i].bytes; - } - - if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - - int32_t ret = setMeterID(pSql, &pInfo->pCreateTableInfo->name, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - pCmd->numOfCols = 0; - pCmd->count = 0; - break; - } - case TSQL_CREATE_STREAM: { - pCmd->command = TSDB_SQL_CREATE_TABLE; - const char* msg1 = "invalid table name"; - const char* msg2 = "table name too long"; - const char* msg3 = "fill only available for interval query"; - const char* msg4 = "fill option not supported in stream computing"; - const char* msg5 = "sql too long"; // todo ADD support - - // if sql specifies db, use it, otherwise use default db - SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); - SQuerySQL* pQuerySql = pInfo->pCreateTableInfo->pSelect; - - if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; - tVariant* pVar = &pSrcMeterName->a[0].pVar; + case TSDB_SQL_CREATE_TABLE: { + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; - SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING}; - if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - if (setMeterID(pSql, &srcToken, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); - } - - int32_t code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - bool isMetric = UTIL_METER_IS_METRIC(pMeterMetaInfo); - if (parseSelectClause(pCmd, pQuerySql->pSelection, isMetric) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - if (pQuerySql->pWhere != NULL) { // query condition in stream computing - if (parseWhereClause(pSql, &pQuerySql->pWhere) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - } - - // set interval value - if (parseIntervalClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } else { - if ((pCmd->nAggTimeInterval > 0) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd) != TSDB_CODE_SUCCESS)) { - return TSDB_CODE_INVALID_SQL; + if (pCreateTable->type == TSQL_CREATE_TABLE || pCreateTable->type == TSQL_CREATE_STABLE) { + if ((code = doCheckForCreateTable(pSql, 0, pInfo)) != TSDB_CODE_SUCCESS) { + return code; } - } - - if (setSlidingClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - // set the created table[stream] name - if (setMeterID(pSql, pzTableName, 0) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - // copy sql length - int ret = tscAllocPayload(pCmd, pQuerySql->selectToken.n + 8); - if (TSDB_CODE_SUCCESS != ret) { - invalidSqlErrMsg(pCmd, "client out of memory"); - return ret; - } - - strncpy(pCmd->payload, pQuerySql->selectToken.z, pQuerySql->selectToken.n); - if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) { - return invalidSqlErrMsg(pCmd, msg5); - } - - if (tsRewriteFieldNameIfNecessary(pCmd) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - pCmd->numOfCols = pCmd->fieldsInfo.numOfOutputCols; - - if (validateSqlFunctionInStreamSql(pCmd) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - /* - * check if fill operation is available, the fill operation is parsed and executed during query execution, not - * here. - */ - if (pQuerySql->fillType != NULL) { - if (pCmd->nAggTimeInterval == 0) { - return invalidSqlErrMsg(pCmd, msg3); + } else if (pCreateTable->type == TSQL_CREATE_TABLE_FROM_STABLE) { + if ((code = doCheckForCreateFromStable(pSql, pInfo)) != TSDB_CODE_SUCCESS) { + return code; } - tVariantListItem* pItem = &pQuerySql->fillType->a[0]; - if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { - if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || - (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { - return invalidSqlErrMsg(pCmd, msg4); - } + } else if (pCreateTable->type == TSQL_CREATE_STREAM) { + if ((code = doCheckForStream(pSql, pInfo)) != TSDB_CODE_SUCCESS) { + return code; } } break; } - case TSQL_QUERY_METER: { - SQuerySQL* pQuerySql = pInfo->pQueryInfo; - assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); - - const char* msg0 = "invalid table name"; - const char* msg1 = "table name too long"; - const char* msg2 = "point interpolation query needs timestamp"; - const char* msg3 = "sliding value too small"; - const char* msg4 = "sliding value no larger than the interval value"; - const char* msg5 = "fill only available for interval query"; - const char* msg6 = "start(end) time of query range required or time range too large"; - const char* msg7 = "illegal number of tables in from clause"; - const char* msg8 = "too many columns in selection clause"; - const char* msg9 = "TWA query requires both the start and end time"; + case TSDB_SQL_SELECT: { + assert(pCmd->numOfClause == 1); + const char* msg1 = "columns in select clause not identical"; - int32_t code = TSDB_CODE_SUCCESS; - - // too many result columns not support order by in query - if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) { - return invalidSqlErrMsg(pCmd, msg8); - } - - /* - * handle the sql expression without from subclause - * select current_database(); - * select server_version(); - * select client_version(); - * select server_state(); - */ - if (pQuerySql->from == NULL) { - assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL && - pQuerySql->pSortOrder == NULL); - return doLocalQueryProcess(pQuerySql, pCmd); - } - - if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) { - return invalidSqlErrMsg(pCmd, msg7); - } - - // set all query tables, which are maybe more than one. - for (int32_t i = 0; i < pQuerySql->from->nExpr; ++i) { - tVariant* pTableItem = &pQuerySql->from->a[i].pVar; - - if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg0); - } - - pTableItem->nLen = strdequote(pTableItem->pz); - - SSQLToken tableName = {.z = pTableItem->pz, .n = pTableItem->nLen, .type = TK_STRING}; - if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg0); - } - - if (pCmd->numOfTables <= i) { - tscAddEmptyMeterMetaInfo(pCmd); - } - - SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; - if (setMeterID(pSql, &t, i) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); - } - - SMeterMetaInfo* pMeterInfo1 = tscGetMeterMetaInfo(pCmd, i); - code = tscGetMeterMeta(pSql, pMeterInfo1->name, i); - if (code != TSDB_CODE_SUCCESS) { + for (int32_t i = pCmd->numOfClause; i < pInfo->subclauseInfo.numOfClause; ++i) { + SQueryInfo* pqi = NULL; + if ((code = tscGetQueryInfoDetailSafely(pCmd, i, &pqi)) != TSDB_CODE_SUCCESS) { return code; } } - pSql->cmd.command = TSDB_SQL_SELECT; - - // parse the group by clause in the first place - if (parseGroupbyClause(pCmd, pQuerySql->pGroupby) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - bool isMetric = UTIL_METER_IS_METRIC(pMeterMetaInfo); - if (parseSelectClause(pCmd, pQuerySql->pSelection, isMetric) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - // set interval value - if (parseIntervalClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } else { - if ((pCmd->nAggTimeInterval > 0) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd) != TSDB_CODE_SUCCESS)) { - return TSDB_CODE_INVALID_SQL; - } - } - - // set sliding value - SSQLToken* pSliding = &pQuerySql->sliding; - if (pSliding->n != 0) { - // TODO refactor pCmd->count == 1 means sql in stream function - if (!tscEmbedded && pCmd->count == 0) { - const char* msg = "not support sliding in query"; - return invalidSqlErrMsg(pCmd, msg); - } - - getTimestampInUsFromStr(pSliding->z, pSliding->n, &pCmd->nSlidingTime); - if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->nSlidingTime /= 1000; - } + assert(pCmd->numOfClause == pInfo->subclauseInfo.numOfClause); + for (int32_t i = 0; i < pInfo->subclauseInfo.numOfClause; ++i) { + SQuerySQL* pQuerySql = pInfo->subclauseInfo.pClause[i]; - if (pCmd->nSlidingTime < tsMinSlidingTime) { - return invalidSqlErrMsg(pCmd, msg3); - } - - if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - return invalidSqlErrMsg(pCmd, msg4); - } - } - - // set order by info - if (parseOrderbyClause(pCmd, pQuerySql, tsGetSchema(pMeterMetaInfo->pMeterMeta), - pMeterMetaInfo->pMeterMeta->numOfColumns) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - // set where info - if (pQuerySql->pWhere != NULL) { - if (parseWhereClause(pSql, &pQuerySql->pWhere) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; - } - - pQuerySql->pWhere = NULL; - - if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->stime = pCmd->stime / 1000; - pCmd->etime = pCmd->etime / 1000; - } - } else { // set the time rang - pCmd->stime = 0; - pCmd->etime = INT64_MAX; - } - - // user does not specified the query time window, twa is not allowed in such case. - if ((pCmd->stime == 0 || pCmd->etime == INT64_MAX || - (pCmd->etime == INT64_MAX / 1000 && pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI)) && - tscIsTWAQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg9); - } - - // no result due to invalid query time range - if (pCmd->stime > pCmd->etime) { - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; - return TSDB_CODE_SUCCESS; - } - - if (!hasTimestampForPointInterpQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg2); - } - - if (pQuerySql->fillType != NULL) { - if (pCmd->nAggTimeInterval == 0 && (!tscIsPointInterpQuery(pCmd))) { - return invalidSqlErrMsg(pCmd, msg5); - } - - if (pCmd->nAggTimeInterval > 0) { - int64_t timeRange = labs(pCmd->stime - pCmd->etime); - // number of result is not greater than 10,000,000 - if ((timeRange == 0) || (timeRange / pCmd->nAggTimeInterval) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) { - return invalidSqlErrMsg(pCmd, msg6); - } - } - - int32_t ret = parseFillClause(pCmd, pQuerySql); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if ((code = doCheckForQuery(pSql, pQuerySql, i)) != TSDB_CODE_SUCCESS) { + return code; } + + tscPrintSelectClause(pSql, i); } - - // in case of join query, time range is required. - if (QUERY_IS_JOIN_QUERY(pCmd->type)) { - int64_t timeRange = labs(pCmd->stime - pCmd->etime); - - if (timeRange == 0 && pCmd->stime == 0) { - return invalidSqlErrMsg(pCmd, msg6); + + // set the command/global limit parameters from the first subclause to the sqlcmd object + SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pCmd, 0); + pCmd->command = pQueryInfo1->command; + + // if there is only one element, the limit of clause is the limit of global result. + for(int32_t i = 1; i < pCmd->numOfClause; ++i) { + SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i); + + int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo); + if (ret != 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } - // handle the limit offset value, validate the limit - pCmd->limit = pQuerySql->limit; + return TSDB_CODE_SUCCESS; // do not build query message here + } - // temporarily save the original limitation value - if ((code = parseLimitClause(pSql, pQuerySql)) != TSDB_CODE_SUCCESS) { + case TSDB_SQL_ALTER_TABLE: { + if ((code = setAlterTableInfo(pSql, pInfo)) != TSDB_CODE_SUCCESS) { return code; } - if ((code = doFunctionsCompatibleCheck(pSql)) != TSDB_CODE_SUCCESS) { + break; + } + + case TSDB_SQL_KILL_QUERY: + case TSDB_SQL_KILL_STREAM: + case TSDB_SQL_KILL_CONNECTION: { + if ((code = setKillInfo(pSql, pInfo)) != TSDB_CODE_SUCCESS) { return code; } - setColumnOffsetValueInResultset(pCmd); - updateTagColumnIndex(pCmd, 0); - break; } - case TSQL_INSERT: { - assert(false); - } - case ALTER_TABLE_ADD_COLUMN: - case ALTER_TABLE_DROP_COLUMN: - case ALTER_TABLE_TAGS_ADD: - case ALTER_TABLE_TAGS_DROP: - case ALTER_TABLE_TAGS_CHG: - case ALTER_TABLE_TAGS_SET: { - return setAlterTableInfo(pSql, pInfo); - } - - case KILL_CONNECTION: - case KILL_QUERY: - case KILL_STREAM: { - return setKillInfo(pSql, pInfo); - } default: - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression"); } - return TSDB_CODE_SUCCESS; + return tscBuildMsg[pCmd->command](pSql, pInfo); } /* * if the top/bottom exists, only tags columns, tbname column, and primary timestamp column * are available. */ -static bool isTopBottomQuery(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; +static bool isTopBottomQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { return true; @@ -1064,11 +578,11 @@ static bool isTopBottomQuery(SSqlCmd* pCmd) { return false; } -int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { +int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg1 = "invalid query expression"; const char* msg2 = "interval cannot be less than 10 ms"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); if (pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0) { return TSDB_CODE_SUCCESS; @@ -1076,42 +590,53 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { // interval is not null SSQLToken* t = &pQuerySql->interval; - if (getTimestampInUsFromStr(t->z, t->n, &pCmd->nAggTimeInterval) != TSDB_CODE_SUCCESS) { + if (getTimestampInUsFromStr(t->z, t->n, &pQueryInfo->nAggTimeInterval) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - /* revised the time precision according to the flag */ + // if the unit of time window value is millisecond, change the value from microsecond if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->nAggTimeInterval = pCmd->nAggTimeInterval / 1000; + pQueryInfo->nAggTimeInterval = pQueryInfo->nAggTimeInterval / 1000; } /* parser has filter the illegal type, no need to check here */ - pCmd->intervalTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1]; + pQueryInfo->intervalTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1]; // interval cannot be less than 10 milliseconds - if (pCmd->nAggTimeInterval < tsMinIntervalTime) { - return invalidSqlErrMsg(pCmd, msg2); + if (pQueryInfo->nAggTimeInterval < tsMinIntervalTime) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } // for top/bottom + interval query, we do not add additional timestamp column in the front - if (isTopBottomQuery(pCmd)) { + if (isTopBottomQuery(pQueryInfo)) { return TSDB_CODE_SUCCESS; } - // check the invalid sql expresssion: select count(tbname)/count(tag1)/count(tag2) from super_table interval(1d); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + /* + * check invalid SQL: + * select count(tbname)/count(tag1)/count(tag2) from super_table_name interval(1d); + */ + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } + + /* + * check invalid SQL: + * select tbname, tags_fields from super_table_name interval(1s) + */ + if (tscQueryMetricTags(pQueryInfo) && pQueryInfo->nAggTimeInterval > 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } // need to add timestamp column in result set, if interval is existed - uint64_t uid = tscSqlExprGet(pCmd, 0)->uid; + uint64_t uid = tscSqlExprGet(pQueryInfo, 0)->uid; int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); if (pMeterMetaInfo->pMeterMeta->uid == uid) { tableIndex = i; break; @@ -1123,51 +648,64 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { } SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + tscSqlExprInsert(pQueryInfo, 0, TSDB_FUNC_TS, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); SColumnList ids = getColumnList(1, 0, PRIMARYKEY_TIMESTAMP_COL_INDEX); - int32_t ret = insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); - return ret; + int32_t ret = insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (parseSlidingClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; } -int32_t setSlidingClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { +int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg0 = "sliding value too small"; const char* msg1 = "sliding value no larger than the interval value"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSQLToken* pSliding = &pQuerySql->sliding; if (pSliding->n != 0) { - getTimestampInUsFromStr(pSliding->z, pSliding->n, &pCmd->nSlidingTime); + getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->nSlidingTime); if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { - pCmd->nSlidingTime /= 1000; + pQueryInfo->nSlidingTime /= 1000; } - if (pCmd->nSlidingTime < tsMinSlidingTime) { - return invalidSqlErrMsg(pCmd, msg0); + if (pQueryInfo->nSlidingTime < tsMinSlidingTime) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } - if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - return invalidSqlErrMsg(pCmd, msg1); + if (pQueryInfo->nSlidingTime > pQueryInfo->nAggTimeInterval) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } + } else { + pQueryInfo->nSlidingTime = -1; } return TSDB_CODE_SUCCESS; } -int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex) { +int32_t setMeterID(SMeterMetaInfo* pMeterMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql) { const char* msg = "name too long"; - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); - int32_t code = TSDB_CODE_SUCCESS; + SSqlCmd* pCmd = &pSql->cmd; + int32_t code = TSDB_CODE_SUCCESS; + // backup the old name in pMeterMetaInfo + size_t size = strlen(pMeterMetaInfo->name); + char* oldName = NULL; + if (size > 0) { + oldName = strdup(pMeterMetaInfo->name); + } + if (hasSpecifyDB(pzTableName)) { - /* - * db has been specified in sql string - * so we ignore current db path - */ + // db has been specified in sql string so we ignore current db path code = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL); } else { // get current DB name first, then set it into path SSQLToken t = {0}; @@ -1177,10 +715,28 @@ int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex) { } if (code != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - return code; + if (code != TSDB_CODE_SUCCESS) { + free(oldName); + return code; + } + + /* + * the old name exists and is not equalled to the new name. Release the metermeta/metricmeta + * that are corresponding to the old name for the new table name. + */ + if (size > 0) { + if (strncasecmp(oldName, pMeterMetaInfo->name, tListLen(pMeterMetaInfo->name)) != 0) { + tscClearMeterMetaInfo(pMeterMetaInfo, false); + } + } else { + assert(pMeterMetaInfo->pMeterMeta == NULL && pMeterMetaInfo->pMetricMeta == NULL); + } + + tfree(oldName); + return TSDB_CODE_SUCCESS; } static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { @@ -1196,13 +752,13 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { // number of fields no less than 2 if (pFieldList->nField <= 1 || pFieldList->nField > TSDB_MAX_COLUMNS) { - invalidSqlErrMsg(pCmd, msg); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return false; } // first column must be timestamp if (pFieldList->p[0].type != TSDB_DATA_TYPE_TIMESTAMP) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } @@ -1213,7 +769,7 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { // max row length must be less than TSDB_MAX_BYTES_PER_ROW if (nLen > TSDB_MAX_BYTES_PER_ROW) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } @@ -1221,23 +777,23 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { for (int32_t i = 0; i < pFieldList->nField; ++i) { TAOS_FIELD* pField = &pFieldList->p[i]; if (pField->type < TSDB_DATA_TYPE_BOOL || pField->type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } if ((pField->type == TSDB_DATA_TYPE_BINARY && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_BINARY_LEN)) || (pField->type == TSDB_DATA_TYPE_NCHAR && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_NCHAR_LEN))) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } if (validateColumnName(pField->name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } if (has(pFieldList, i + 1, pFieldList->p[i].name) == true) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } @@ -1258,7 +814,7 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq // number of fields at least 1 if (pTagsList->nField < 1 || pTagsList->nField > TSDB_MAX_TAGS) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } @@ -1269,14 +825,14 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq // max tag row length must be less than TSDB_MAX_TAGS_LEN if (nLen > TSDB_MAX_TAGS_LEN) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } // field name must be unique for (int32_t i = 0; i < pTagsList->nField; ++i) { if (has(pFieldList, 0, pTagsList->p[i].name) == true) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } @@ -1284,28 +840,28 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq /* timestamp in tag is not allowed */ for (int32_t i = 0; i < pTagsList->nField; ++i) { if (pTagsList->p[i].type == TSDB_DATA_TYPE_TIMESTAMP) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } if (pTagsList->p[i].type < TSDB_DATA_TYPE_BOOL || pTagsList->p[i].type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } if ((pTagsList->p[i].type == TSDB_DATA_TYPE_BINARY && pTagsList->p[i].bytes <= 0) || (pTagsList->p[i].type == TSDB_DATA_TYPE_NCHAR && pTagsList->p[i].bytes <= 0)) { - invalidSqlErrMsg(pCmd, msg7); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); return false; } if (validateColumnName(pTagsList->p[i].name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } if (has(pTagsList, i + 1, pTagsList->p[i].name) == true) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } @@ -1324,7 +880,9 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { const char* msg5 = "invalid binary/nchar tag length"; const char* msg6 = "invalid data type in tags"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pCmd->numOfClause == 1); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; // no more than 6 tags @@ -1332,18 +890,18 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { char msg[128] = {0}; sprintf(msg, "tags no more than %d", TSDB_MAX_TAGS); - invalidSqlErrMsg(pCmd, msg); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return false; } // no timestamp allowable if (pTagField->type == TSDB_DATA_TYPE_TIMESTAMP) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } - if (pTagField->type < TSDB_DATA_TYPE_BOOL && pTagField->type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg6); + if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_NCHAR)) { + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } @@ -1356,19 +914,19 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { // length less than TSDB_MAX_TASG_LEN if (nLen + pTagField->bytes > TSDB_MAX_TAGS_LEN) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } // tags name can not be a keyword if (validateColumnName(pTagField->name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } // binary(val), val can not be equalled to or less than 0 if ((pTagField->type == TSDB_DATA_TYPE_BINARY || pTagField->type == TSDB_DATA_TYPE_NCHAR) && pTagField->bytes <= 0) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } @@ -1377,7 +935,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { if (strncasecmp(pTagField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } } @@ -1393,23 +951,24 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { const char* msg5 = "invalid column name"; const char* msg6 = "invalid column length"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pCmd->numOfClause == 1); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; // no more max columns if (pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS || pMeterMeta->numOfTags + pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } if (validateColumnName(pColField->name) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg5); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } @@ -1421,20 +980,20 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { } if (pColField->bytes <= 0) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } // length less than TSDB_MAX_BYTES_PER_ROW if (nLen + pColField->bytes > TSDB_MAX_BYTES_PER_ROW) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } // field name must be unique for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { if (strncasecmp(pColField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } } @@ -1469,7 +1028,7 @@ static bool hasSpecifyDB(SSQLToken* pTableName) { return false; } -static int32_t setObjFullName(char* fullName, char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* xlen) { +int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* xlen) { int32_t totalLen = 0; if (account != NULL) { @@ -1532,50 +1091,55 @@ static void extractColumnNameFromString(tSQLExprItem* pItem) { } } -int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric) { +int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable) { assert(pSelection != NULL && pCmd != NULL); - const char* msg1 = "invalid column name/illegal column type in arithmetic expression"; + const char* msg1 = "invalid column name, or illegal column type"; const char* msg2 = "functions can not be mixed up"; const char* msg3 = "not support query expression"; + const char* msg4 = "columns from different table mixed up in arithmetic expression"; + const char* msg5 = "invalid function name"; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); for (int32_t i = 0; i < pSelection->nExpr; ++i) { - int32_t outputIndex = pCmd->fieldsInfo.numOfOutputCols; + int32_t outputIndex = pQueryInfo->fieldsInfo.numOfOutputCols; tSQLExprItem* pItem = &pSelection->a[i]; // project on all fields if (pItem->pNode->nSQLOptr == TK_ALL || pItem->pNode->nSQLOptr == TK_ID || pItem->pNode->nSQLOptr == TK_STRING) { // it is actually a function, but the function name is invalid if (pItem->pNode->nSQLOptr == TK_ID && (pItem->pNode->colInfo.z == NULL && pItem->pNode->colInfo.n == 0)) { - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } // if the name of column is quoted, remove it and set the right information for later process extractColumnNameFromString(pItem); - pCmd->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; // select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2 - if (addProjectionExprAndResultField(pCmd, pItem) != TSDB_CODE_SUCCESS) { + if (addProjectionExprAndResultField(pQueryInfo, pItem) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - } else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_LAST_ROW) { + } else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_AVG_IRATE) { // sql function in selection clause, append sql function info in pSqlCmd structure sequentially - if (addExprAndResultField(pCmd, outputIndex, pItem) != TSDB_CODE_SUCCESS) { + if (addExprAndResultField(pQueryInfo, outputIndex, pItem) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } } else if (pItem->pNode->nSQLOptr >= TK_PLUS && pItem->pNode->nSQLOptr <= TK_REM) { // arithmetic function in select - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - - SColumnIdListRes columnList = {.pSchema = pSchema, .numOfCols = pMeterMetaInfo->pMeterMeta->numOfColumns}; - - int32_t ret = - validateArithmeticSQLExpr(pItem->pNode, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns, &columnList); - if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + SColumnList columnList = {0}; + if (validateArithmeticSQLExpr(pItem->pNode, pQueryInfo, &columnList) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } + + int32_t tableIndex = columnList.ids[0].tableIndex; + for(int32_t f = 1; f < columnList.num; ++f) { + if (columnList.ids[f].tableIndex != tableIndex) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } } char arithmeticExprStr[1024] = {0}; @@ -1586,10 +1150,10 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric } // expr string is set as the parameter of function - SColumnIndex index = {0}; - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, outputIndex, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, + SColumnIndex index = {.tableIndex = tableIndex}; + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputIndex, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), sizeof(double)); - addExprParams(pExpr, arithmeticExprStr, TSDB_DATA_TYPE_BINARY, strlen(arithmeticExprStr), 0); + addExprParams(pExpr, arithmeticExprStr, TSDB_DATA_TYPE_BINARY, strlen(arithmeticExprStr), index.tableIndex); /* todo alias name should use the original sql string */ if (pItem->aliasName != NULL) { @@ -1598,40 +1162,40 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric strncpy(pExpr->aliasName, arithmeticExprStr, TSDB_COL_NAME_LEN); } - insertResultField(pCmd, i, &columnList.list, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName); + insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName); } else { /* * not support such expression * e.g., select 12+5 from table_name */ - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - if (pCmd->fieldsInfo.numOfOutputCols > TSDB_MAX_COLUMNS) { + if (pQueryInfo->fieldsInfo.numOfOutputCols > TSDB_MAX_COLUMNS) { return TSDB_CODE_INVALID_SQL; } } - if (!functionCompatibleCheck(pCmd)) { - return invalidSqlErrMsg(pCmd, msg2); + if (!functionCompatibleCheck(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - if (isMetric) { - pCmd->type |= TSDB_QUERY_TYPE_STABLE_QUERY; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + if (isSTable) { + pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_QUERY; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - if (tscQueryMetricTags(pCmd)) { // local handle the metric tag query - pCmd->command = TSDB_SQL_RETRIEVE_TAGS; + if (tscQueryMetricTags(pQueryInfo)) { // local handle the metric tag query pCmd->count = pMeterMetaInfo->pMeterMeta->numOfColumns; // the number of meter schema, tricky. + pQueryInfo->command = TSDB_SQL_RETRIEVE_TAGS; } /* * transfer sql functions that need secondary merge into another format * in dealing with metric queries such as: count/first/last */ - tscTansformSQLFunctionForMetricQuery(pCmd); + tscTansformSQLFunctionForSTableQuery(pQueryInfo); - if (hasUnsupportFunctionsForMetricQuery(pCmd)) { + if (hasUnsupportFunctionsForSTableQuery(pQueryInfo)) { return TSDB_CODE_INVALID_SQL; } } @@ -1639,18 +1203,19 @@ int32_t parseSelectClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric return TSDB_CODE_SUCCESS; } -int32_t insertResultField(SSqlCmd* pCmd, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, +int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, char* fieldName) { for (int32_t i = 0; i < pIdList->num; ++i) { - tscColumnBaseInfoInsert(pCmd, &(pIdList->ids[i])); + tscColumnBaseInfoInsert(pQueryInfo, &(pIdList->ids[i])); } - tscFieldInfoSetValue(&pCmd->fieldsInfo, outputIndex, type, fieldName, bytes); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, outputIndex, type, fieldName, bytes); return TSDB_CODE_SUCCESS; } -SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx, int32_t tableIndex) { - SMeterMeta* pMeterMeta = tscGetMeterMetaInfo(pCmd, tableIndex)->pMeterMeta; +SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIdx, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); + SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetColumnSchema(pMeterMeta, colIdx); int32_t numOfCols = pMeterMeta->numOfColumns; @@ -1658,21 +1223,21 @@ SSqlExpr* doAddProjectCol(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx, in int16_t functionId = (int16_t)((colIdx >= numOfCols) ? TSDB_FUNC_TAGPRJ : TSDB_FUNC_PRJ); if (functionId == TSDB_FUNC_TAGPRJ) { - addRequiredTagColumn(pCmd, colIdx - numOfCols, tableIndex); - pCmd->type = TSDB_QUERY_TYPE_STABLE_QUERY; + addRequiredTagColumn(pQueryInfo, colIdx - numOfCols, tableIndex); + pQueryInfo->type = TSDB_QUERY_TYPE_STABLE_QUERY; } else { - pCmd->type = TSDB_QUERY_TYPE_PROJECTION_QUERY; + pQueryInfo->type = TSDB_QUERY_TYPE_PROJECTION_QUERY; } SColumnIndex index = {tableIndex, colIdx}; SSqlExpr* pExpr = - tscSqlExprInsert(pCmd, outputIndex, functionId, &index, pSchema->type, pSchema->bytes, pSchema->bytes); + tscSqlExprInsert(pQueryInfo, outputIndex, functionId, &index, pSchema->type, pSchema->bytes, pSchema->bytes); return pExpr; } -void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); +void addRequiredTagColumn(SQueryInfo* pQueryInfo, int32_t tagColIndex, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); if (pMeterMetaInfo->numOfTags == 0 || pMeterMetaInfo->tagColumnIndex[pMeterMetaInfo->numOfTags - 1] < tagColIndex) { pMeterMetaInfo->tagColumnIndex[pMeterMetaInfo->numOfTags++] = tagColIndex; @@ -1698,10 +1263,11 @@ void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex assert(tagColIndex >= -1 && tagColIndex < TSDB_MAX_TAGS && pMeterMetaInfo->numOfTags <= TSDB_MAX_TAGS + 1); } -static void addProjectQueryCol(SSqlCmd* pCmd, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) { - SSqlExpr* pExpr = doAddProjectCol(pCmd, startPos, pIndex->columnIndex, pIndex->tableIndex); +static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) { + SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex); - SMeterMeta* pMeterMeta = tscGetMeterMetaInfo(pCmd, pIndex->tableIndex)->pMeterMeta; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pIndex->tableIndex); + SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetColumnSchema(pMeterMeta, pIndex->columnIndex); @@ -1715,42 +1281,42 @@ static void addProjectQueryCol(SSqlCmd* pCmd, int32_t startPos, SColumnIndex* pI ids.num = 0; } - insertResultField(pCmd, startPos, &ids, pExpr->resBytes, pExpr->resType, colName); + insertResultField(pQueryInfo, startPos, &ids, pExpr->resBytes, pExpr->resType, colName); } -void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, - SSchema* pColSchema, int16_t flag) { - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, outputColIndex, functionId, pIndex, pColSchema->type, pColSchema->bytes, - pColSchema->bytes); +void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, + SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) { + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type, + pColSchema->bytes, pColSchema->bytes); SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex); if (TSDB_COL_IS_TAG(flag)) { ids.num = 0; } - insertResultField(pCmd, outputColIndex, &ids, pColSchema->bytes, pColSchema->type, pColSchema->name); + insertResultField(pQueryInfo, outputColIndex, &ids, pColSchema->bytes, pColSchema->type, pColSchema->name); pExpr->colInfo.flag = flag; if (TSDB_COL_IS_TAG(flag)) { - addRequiredTagColumn(pCmd, pIndex->columnIndex, pIndex->tableIndex); + addRequiredTagColumn(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex); } } -static int32_t doAddProjectionExprAndResultFields(SSqlCmd* pCmd, SColumnIndex* pIndex, int32_t startPos) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pIndex->tableIndex); +static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, int32_t startPos) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pIndex->tableIndex); int32_t numOfTotalColumns = 0; SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetSchema(pMeterMeta); - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { numOfTotalColumns = pMeterMeta->numOfColumns + pMeterMeta->numOfTags; } else { numOfTotalColumns = pMeterMeta->numOfColumns; } for (int32_t j = 0; j < numOfTotalColumns; ++j) { - doAddProjectCol(pCmd, startPos + j, j, pIndex->tableIndex); + doAddProjectCol(pQueryInfo, startPos + j, j, pIndex->tableIndex); pIndex->columnIndex = j; SColumnList ids = {0}; @@ -1759,57 +1325,56 @@ static int32_t doAddProjectionExprAndResultFields(SSqlCmd* pCmd, SColumnIndex* p // tag columns do not add to source list ids.num = (j >= pMeterMeta->numOfColumns) ? 0 : 1; - insertResultField(pCmd, startPos + j, &ids, pSchema[j].bytes, pSchema[j].type, pSchema[j].name); + insertResultField(pQueryInfo, startPos + j, &ids, pSchema[j].bytes, pSchema[j].type, pSchema[j].name); } return numOfTotalColumns; } -int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem) { +int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem) { const char* msg0 = "invalid column name"; const char* msg1 = "tag for table query is not allowed"; - int32_t startPos = pCmd->fieldsInfo.numOfOutputCols; + int32_t startPos = pQueryInfo->fieldsInfo.numOfOutputCols; if (pItem->pNode->nSQLOptr == TK_ALL) { // project on all fields SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getTableIndexByName(&pItem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_INVALID_SQL; + if (getTableIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } // all meters columns are required if (index.tableIndex == COLUMN_INDEX_INITIAL_VAL) { // all table columns are required. - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { index.tableIndex = i; - int32_t inc = doAddProjectionExprAndResultFields(pCmd, &index, startPos); + int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos); startPos += inc; } } else { - doAddProjectionExprAndResultFields(pCmd, &index, startPos); + doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos); } } else if (pItem->pNode->nSQLOptr == TK_ID) { // simple column projection query SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pItem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg0); + if (getColumnIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - SColumnIndex index1 = {0, TSDB_TBNAME_COLUMN_INDEX}; - SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; + SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; strcpy(colSchema.name, TSQL_TBNAME_L); - pCmd->type = TSDB_QUERY_TYPE_STABLE_QUERY; - tscAddSpecialColumnForSelect(pCmd, startPos, TSDB_FUNC_TAGPRJ, &index1, &colSchema, true); + pQueryInfo->type = TSDB_QUERY_TYPE_STABLE_QUERY; + tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true); } else { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; if (index.columnIndex >= pMeterMeta->numOfColumns && UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - addProjectQueryCol(pCmd, startPos, &index, pItem); + addProjectQueryCol(pQueryInfo, startPos, &index, pItem); } } else { return TSDB_CODE_INVALID_SQL; @@ -1818,7 +1383,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem) { return TSDB_CODE_SUCCESS; } -static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t functionID, char* aliasName, +static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName, int32_t resColIdx, SColumnIndex* pColIndex) { int16_t type = 0; int16_t bytes = 0; @@ -1830,7 +1395,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY || pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_NCHAR || pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BOOL) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(pQueryInfo->msg, msg1); return -1; } else { type = TSDB_DATA_TYPE_DOUBLE; @@ -1847,26 +1412,24 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t getRevisedName(columnName, functionID, TSDB_COL_NAME_LEN, pSchema[pColIndex->columnIndex].name); } - tscSqlExprInsert(pCmd, resColIdx, functionID, pColIndex, type, bytes, bytes); + tscSqlExprInsert(pQueryInfo, resColIdx, functionID, pColIndex, type, bytes, bytes); // for point interpolation/last_row query, we need the timestamp column to be loaded SColumnIndex index = {.tableIndex = pColIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; if (functionID == TSDB_FUNC_INTERP || functionID == TSDB_FUNC_LAST_ROW) { - tscColumnBaseInfoInsert(pCmd, &index); + tscColumnBaseInfoInsert(pQueryInfo, &index); } SColumnList ids = getColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex); - insertResultField(pCmd, resColIdx, &ids, bytes, type, columnName); + insertResultField(pQueryInfo, resColIdx, &ids, bytes, type, columnName); return TSDB_CODE_SUCCESS; } -int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem) { +int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIdx, tSQLExprItem* pItem) { SMeterMetaInfo* pMeterMetaInfo = NULL; int32_t optr = pItem->pNode->nSQLOptr; - int32_t numOfAddedColumn = 1; - const char* msg1 = "not support column types"; const char* msg2 = "invalid parameters"; const char* msg3 = "illegal column name"; @@ -1878,7 +1441,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem case TK_COUNT: { if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) { /* more than one parameter for count() function */ - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } int16_t functionID = 0; @@ -1891,7 +1454,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (pItem->pNode->pParam != NULL) { SSQLToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo; if (pToken->z == NULL || pToken->n == 0) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; @@ -1900,20 +1463,20 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // check if the table name is valid or not SSQLToken tmpToken = pParamElem->pNode->colInfo; - if (getTableIndexByName(&tmpToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg4); + if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - tscSqlExprInsert(pCmd, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); + tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); } else { // count the number of meters created according to the metric - if (getColumnIndexByNameEx(pToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + if (getColumnIndexByName(pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); // count tag is equalled to count(tbname) if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { @@ -1921,13 +1484,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem } int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - tscSqlExprInsert(pCmd, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); + tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); } } else { // count(*) is equalled to count(primary_timestamp_key) index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - tscSqlExprInsert(pCmd, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); + tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size); } char columnName[TSDB_COL_NAME_LEN] = {0}; @@ -1936,11 +1499,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // count always use the primary timestamp key column, which is 0. SColumnList ids = getColumnList(1, index.tableIndex, index.columnIndex); - insertResultField(pCmd, colIdx, &ids, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, columnName); + insertResultField(pQueryInfo, colIdx, &ids, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, columnName); return TSDB_CODE_SUCCESS; } case TK_SUM: case TK_AVG: + case TK_RATE: + case TK_IRATE: + case TK_SUM_RATE: + case TK_SUM_IRATE: + case TK_AVG_RATE: + case TK_AVG_IRATE: case TK_TWA: case TK_MIN: case TK_MAX: @@ -1951,26 +1520,27 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (pItem->pNode->pParam == NULL || (optr != TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 1) || (optr == TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 3)) { /* no parameters or more than one parameter for function */ - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + if ((getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) || + index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } // 2. check if sql function can be applied on this column data type - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, index.columnIndex); int16_t colType = pSchema->type; - if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg1); + if (colType <= TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } char columnName[TSDB_COL_NAME_LEN] = {0}; @@ -1994,18 +1564,19 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (optr == TK_DIFF) { colIdx += 1; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; - tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + tscSqlExprInsert(pQueryInfo, 0, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, + TSDB_KEYSIZE); SColumnList ids = getColumnList(1, 0, 0); - insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName); + insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName); } // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - return invalidSqlErrMsg(pCmd, msg6); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionID, &index, resultType, resultSize, resultSize); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, colIdx, functionID, &index, resultType, resultSize, resultSize); if (optr == TK_LEASTSQUARES) { /* set the leastsquares parameters */ @@ -2028,7 +1599,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem ids.num = 1; ids.ids[0] = index; - insertResultField(pCmd, colIdx, &ids, pExpr->resBytes, pExpr->resType, columnName); + insertResultField(pQueryInfo, colIdx, &ids, pExpr->resBytes, pExpr->resType, columnName); return TSDB_CODE_SUCCESS; } @@ -2044,7 +1615,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem if (!requireAllFields) { if (pItem->pNode->pParam->nExpr < 1) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } /* in first/last function, multiple columns can be add to resultset */ @@ -2052,7 +1623,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem for (int32_t i = 0; i < pItem->pNode->pParam->nExpr; ++i) { tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[i]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -2061,34 +1632,34 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // select table.* SSQLToken tmpToken = pParamElem->pNode->colInfo; - if (getTableIndexByName(&tmpToken, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg4); + if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); for (int32_t j = 0; j < pMeterMetaInfo->pMeterMeta->numOfColumns; ++j) { index.columnIndex = j; - if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx++, &index) != 0) { + if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIdx++, &index) != 0) { return TSDB_CODE_INVALID_SQL; } } } else { - if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); // functions can not be applied to tags - if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - return invalidSqlErrMsg(pCmd, msg6); + if ((index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) || (index.columnIndex < 0)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } - if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx + i, &index) != 0) { + if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIdx + i, &index) != 0) { return TSDB_CODE_INVALID_SQL; } } @@ -2098,13 +1669,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem } else { // select * from xxx int32_t numOfFields = 0; - for (int32_t j = 0; j < pCmd->numOfTables; ++j) { - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, j); + for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, j); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); for (int32_t i = 0; i < pMeterMetaInfo->pMeterMeta->numOfColumns; ++i) { SColumnIndex index = {.tableIndex = j, .columnIndex = i}; - if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx + i + j, &index) != 0) { + if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIdx + i + j, &index) != + 0) { return TSDB_CODE_INVALID_SQL; } } @@ -2122,39 +1694,39 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // 1. valid the number of parameters if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 2) { /* no parameters or more than one parameter for function */ - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } char columnName[TSDB_COL_NAME_LEN] = {0}; getColumnName(pItem, columnName, TSDB_COL_NAME_LEN); SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pParamElem->pNode->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg3); + if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); // functions can not be applied to tags if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { - return invalidSqlErrMsg(pCmd, msg6); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } // 2. valid the column type int16_t colType = pSchema[index.columnIndex].type; if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // 3. valid the parameters if (pParamElem[1].pNode->nSQLOptr == TK_ID) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tVariant* pVariant = &pParamElem[1].pNode->val; @@ -2162,13 +1734,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem int8_t resultType = pSchema[index.columnIndex].type; int16_t resultSize = pSchema[index.columnIndex].bytes; - char val[8] = {0}; + char val[8] = {0}; + int32_t numOfAddedColumn = 1; if (optr == TK_PERCENTILE || optr == TK_APERCENTILE) { tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE); - double dp = *((double*)val); + double dp = GET_DOUBLE_VAL(val); if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) { - return invalidSqlErrMsg(pCmd, msg5); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } resultSize = sizeof(double); @@ -2184,14 +1757,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem return TSDB_CODE_INVALID_SQL; } - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionId, &index, resultType, resultSize, resultSize); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, colIdx, functionId, &index, resultType, resultSize, resultSize); addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); } else { tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT); int64_t nTop = *((int32_t*)val); if (nTop <= 0 || nTop > 100) { // todo use macro - return invalidSqlErrMsg(pCmd, msg5); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } int16_t functionId = 0; @@ -2201,22 +1774,22 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem // set the first column ts for top/bottom query SColumnIndex index1 = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + tscSqlExprInsert(pQueryInfo, 0, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); const int32_t TS_COLUMN_INDEX = 0; SColumnList ids = getColumnList(1, 0, TS_COLUMN_INDEX); - insertResultField(pCmd, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, + insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); colIdx += 1; // the first column is ts numOfAddedColumn += 1; - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionId, &index, resultType, resultSize, resultSize); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, colIdx, functionId, &index, resultType, resultSize, resultSize); addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t), 0); } SColumnList ids = getColumnList(1, 0, index.columnIndex); - insertResultField(pCmd, colIdx, &ids, resultSize, resultType, columnName); + insertResultField(pQueryInfo, colIdx, &ids, resultSize, resultType, columnName); return TSDB_CODE_SUCCESS; } @@ -2261,8 +1834,8 @@ static bool isTablenameToken(SSQLToken* token) { return (strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_TBNAME_L)); } -static int16_t doGetColumnIndex(SSqlCmd* pCmd, int32_t index, SSQLToken* pToken) { - SMeterMeta* pMeterMeta = tscGetMeterMetaInfo(pCmd, index)->pMeterMeta; +static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken* pToken) { + SMeterMeta* pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index)->pMeterMeta; int32_t numOfCols = pMeterMeta->numOfColumns + pMeterMeta->numOfTags; SSchema* pSchema = tsGetSchema(pMeterMeta); @@ -2282,7 +1855,7 @@ static int16_t doGetColumnIndex(SSqlCmd* pCmd, int32_t index, SSQLToken* pToken) return columnIndex; } -int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { +int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { const char* msg0 = "ambiguous column name"; const char* msg1 = "invalid column name"; @@ -2293,12 +1866,12 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p } else { // not specify the table name, try to locate the table index by column name if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) { - for (int16_t i = 0; i < pCmd->numOfTables; ++i) { - int16_t colIndex = doGetColumnIndex(pCmd, i, pToken); + for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) { + int16_t colIndex = doGetColumnIndex(pQueryInfo, i, pToken); if (colIndex != COLUMN_INDEX_INITIAL_VAL) { if (pIndex->columnIndex != COLUMN_INDEX_INITIAL_VAL) { - return invalidSqlErrMsg(pCmd, msg0); + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } else { pIndex->tableIndex = i; pIndex->columnIndex = colIndex; @@ -2306,14 +1879,14 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p } } } else { // table index is valid, get the column index - int16_t colIndex = doGetColumnIndex(pCmd, pIndex->tableIndex, pToken); + int16_t colIndex = doGetColumnIndex(pQueryInfo, pIndex->tableIndex, pToken); if (colIndex != COLUMN_INDEX_INITIAL_VAL) { pIndex->columnIndex = colIndex; } } if (pIndex->columnIndex == COLUMN_INDEX_INITIAL_VAL) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } @@ -2324,9 +1897,9 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* p } } -static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { +int32_t getMeterIndex(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { if (pTableToken->n == 0) { // only one table and no table name prefix in column name - if (pCmd->numOfTables == 1) { + if (pQueryInfo->numOfTables == 1) { pIndex->tableIndex = 0; } @@ -2336,9 +1909,9 @@ static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex pIndex->tableIndex = COLUMN_INDEX_INITIAL_VAL; char tableName[TSDB_METER_ID_LEN + 1] = {0}; - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - extractMeterName(pMeterMetaInfo->name, tableName); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + extractTableName(pMeterMetaInfo->name, tableName); if (strncasecmp(tableName, pTableToken->z, pTableToken->n) == 0 && strlen(tableName) == pTableToken->n) { pIndex->tableIndex = i; @@ -2353,29 +1926,29 @@ static int32_t getMeterIndex(SSQLToken* pTableToken, SSqlCmd* pCmd, SColumnIndex return TSDB_CODE_SUCCESS; } -int32_t getTableIndexByName(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { +int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { SSQLToken tableToken = {0}; extractTableNameFromToken(pToken, &tableToken); - if (getMeterIndex(&tableToken, pCmd, pIndex) != TSDB_CODE_SUCCESS) { + if (getMeterIndex(&tableToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } return TSDB_CODE_SUCCESS; } -int32_t getColumnIndexByNameEx(SSQLToken* pToken, SSqlCmd* pCmd, SColumnIndex* pIndex) { - if (pCmd->pMeterInfo == NULL || pCmd->numOfTables == 0) { +int32_t getColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { + if (pQueryInfo->pMeterInfo == NULL || pQueryInfo->numOfTables == 0) { return TSDB_CODE_INVALID_SQL; } SSQLToken tmpToken = *pToken; - if (getTableIndexByName(&tmpToken, pCmd, pIndex) != TSDB_CODE_SUCCESS) { + if (getTableIndexByName(&tmpToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - return doGetColumnIndexByName(&tmpToken, pCmd, pIndex); + return doGetColumnIndexByName(&tmpToken, pQueryInfo, pIndex); } int32_t changeFunctionID(int32_t optr, int16_t* functionId) { @@ -2389,6 +1962,24 @@ int32_t changeFunctionID(int32_t optr, int16_t* functionId) { case TK_AVG: *functionId = TSDB_FUNC_AVG; break; + case TK_RATE: + *functionId = TSDB_FUNC_RATE; + break; + case TK_IRATE: + *functionId = TSDB_FUNC_IRATE; + break; + case TK_SUM_RATE: + *functionId = TSDB_FUNC_SUM_RATE; + break; + case TK_SUM_IRATE: + *functionId = TSDB_FUNC_SUM_IRATE; + break; + case TK_AVG_RATE: + *functionId = TSDB_FUNC_AVG_RATE; + break; + case TK_AVG_IRATE: + *functionId = TSDB_FUNC_AVG_IRATE; + break; case TK_MIN: *functionId = TSDB_FUNC_MIN; break; @@ -2441,125 +2032,76 @@ int32_t changeFunctionID(int32_t optr, int16_t* functionId) { return TSDB_CODE_SUCCESS; } -// TODO support like for showing metrics, there are show meters with like ops int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + assert(pCmd->numOfClause == 1); + pCmd->command = TSDB_SQL_SHOW; - int8_t type = pInfo->sqlType; - const char* msg = "database name too long"; - const char* msg1 = "invalid database name"; + const char* msg1 = "invalid name"; const char* msg2 = "pattern filter string too long"; - - switch (type) { - case SHOW_VGROUPS: - pCmd->showType = TSDB_MGMT_TABLE_VGROUP; - break; - case SHOW_TABLES: - pCmd->showType = TSDB_MGMT_TABLE_TABLE; - break; - case SHOW_STABLES: - pCmd->showType = TSDB_MGMT_TABLE_METRIC; - break; - - case SHOW_DATABASES: - pCmd->showType = TSDB_MGMT_TABLE_DB; - break; - case SHOW_MNODES: - pCmd->showType = TSDB_MGMT_TABLE_MNODE; - break; - case SHOW_DNODES: - pCmd->showType = TSDB_MGMT_TABLE_PNODE; - break; - case SHOW_ACCOUNTS: - pCmd->showType = TSDB_MGMT_TABLE_ACCT; - break; - case SHOW_USERS: - pCmd->showType = TSDB_MGMT_TABLE_USER; - break; - case SHOW_MODULES: - pCmd->showType = TSDB_MGMT_TABLE_MODULE; - break; - case SHOW_CONNECTIONS: - pCmd->showType = TSDB_MGMT_TABLE_CONNS; - break; - case SHOW_QUERIES: - pCmd->showType = TSDB_MGMT_TABLE_QUERIES; - break; - case SHOW_SCORES: - pCmd->showType = TSDB_MGMT_TABLE_SCORES; - break; - case SHOW_GRANTS: - pCmd->showType = TSDB_MGMT_TABLE_GRANTS; - break; - case SHOW_STREAMS: - pCmd->showType = TSDB_MGMT_TABLE_STREAMS; - break; - case SHOW_CONFIGS: - pCmd->showType = TSDB_MGMT_TABLE_CONFIGS; - break; - case SHOW_VNODES: - pCmd->showType = TSDB_MGMT_TABLE_VNODES; - break; - default: - return TSDB_CODE_INVALID_SQL; - } + const char* msg3 = "database name too long"; + const char* msg4 = "invalid ip address"; + const char* msg5 = "database name is empty"; + const char* msg6 = "pattern string is empty"; /* * database prefix in pInfo->pDCLInfo->a[0] * wildcard in like clause in pInfo->pDCLInfo->a[1] */ - if (type == SHOW_TABLES || type == SHOW_STABLES || type == SHOW_VGROUPS) { + SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt; + int16_t showType = pShowInfo->showType; + if (showType == TSDB_MGMT_TABLE_TABLE || showType == TSDB_MGMT_TABLE_METRIC || showType == TSDB_MGMT_TABLE_VGROUP) { // db prefix in tagCond, show table conds in payload - if (pInfo->pDCLInfo->nTokens > 0) { - SSQLToken* pDbPrefixToken = &pInfo->pDCLInfo->a[0]; + SSQLToken* pDbPrefixToken = &pShowInfo->prefix; + if (pDbPrefixToken->type != 0) { + assert(pDbPrefixToken->n >= 0); if (pDbPrefixToken->n > TSDB_DB_NAME_LEN) { // db name is too long - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (pDbPrefixToken->n > 0 && tscValidateName(pDbPrefixToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (pDbPrefixToken->n <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - int32_t ret = 0; - if (pDbPrefixToken->n > 0) { // has db prefix - ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pDbPrefixToken, NULL, NULL); + if (tscValidateName(pDbPrefixToken) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } + int32_t ret = setObjFullName(pMeterMetaInfo->name, getAccountId(pSql), pDbPrefixToken, NULL, NULL); if (ret != TSDB_CODE_SUCCESS) { - return ret; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } + } - if (type != SHOW_VGROUPS && pInfo->pDCLInfo->nTokens == 2) { - // set the like conds for show tables - SSQLToken* likeToken = &pInfo->pDCLInfo->a[1]; + // show table/stable like 'xxxx', set the like pattern for show tables + SSQLToken* pPattern = &pShowInfo->pattern; + if (pPattern->type != 0) { + pPattern->n = strdequote(pPattern->z); - strncpy(pCmd->payload, likeToken->z, likeToken->n); - pCmd->payloadLen = strdequote(pCmd->payload); + if (pPattern->n <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + } - if (pCmd->payloadLen > TSDB_METER_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg2); - } + if (pCmd->payloadLen > TSDB_METER_NAME_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } } - }else if (type == SHOW_VNODES) { - if (NULL == pInfo->pDCLInfo) { - return invalidSqlErrMsg(pCmd, "No specified ip of dnode"); + } else if (showType == TSDB_MGMT_TABLE_VNODES) { + if (pShowInfo->prefix.type == 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "No specified ip of dnode"); } // show vnodes may be ip addr of dnode in payload - if (pInfo->pDCLInfo->nTokens > 0) { - SSQLToken* pDnodeIp = &pInfo->pDCLInfo->a[0]; - - if (pDnodeIp->n > TSDB_IPv4ADDR_LEN) { // ip addr is too long - return invalidSqlErrMsg(pCmd, msg); - } + SSQLToken* pDnodeIp = &pShowInfo->prefix; + if (pDnodeIp->n > TSDB_IPv4ADDR_LEN) { // ip addr is too long + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } - strncpy(pCmd->payload, pDnodeIp->z, pDnodeIp->n); - pCmd->payloadLen = strdequote(pCmd->payload); + if (!validateIpAddress(pDnodeIp->z, pDnodeIp->n)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } } @@ -2567,60 +2109,54 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { - SSqlCmd* pCmd = &pSql->cmd; + const char* msg1 = "invalid ip address"; + const char* msg2 = "invalid port"; - switch (pInfo->sqlType) { - case KILL_QUERY: - pCmd->command = TSDB_SQL_KILL_QUERY; - break; - case KILL_STREAM: - pCmd->command = TSDB_SQL_KILL_STREAM; - break; - case KILL_CONNECTION: - pCmd->command = TSDB_SQL_KILL_CONNECTION; - break; - default: - return TSDB_CODE_INVALID_SQL; - } + SSqlCmd* pCmd = &pSql->cmd; + pCmd->command = pInfo->type; - SSQLToken* pToken = &(pInfo->pDCLInfo->a[0]); - if (pToken->n > TSDB_KILL_MSG_LEN) { + SSQLToken* ip = &(pInfo->pDCLInfo->ip); + if (ip->n > TSDB_KILL_MSG_LEN) { return TSDB_CODE_INVALID_SQL; } - strncpy(pCmd->payload, pToken->z, pToken->n); + strncpy(pCmd->payload, ip->z, ip->n); const char delim = ':'; - char* ipStr = strtok(pToken->z, &delim); - char* portStr = strtok(NULL, &delim); - if (!validateIpAddress(ipStr)) { + char* ipStr = strtok(ip->z, &delim); + char* portStr = strtok(NULL, &delim); + + if (!validateIpAddress(ipStr, strlen(ipStr))) { memset(pCmd->payload, 0, tListLen(pCmd->payload)); - const char* msg = "invalid ip address"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } uint16_t port = (uint16_t)strtol(portStr, NULL, 10); if (port <= 0 || port > 65535) { memset(pCmd->payload, 0, tListLen(pCmd->payload)); - - const char* msg = "invalid port"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } return TSDB_CODE_SUCCESS; } -bool validateIpAddress(char* ip) { - in_addr_t ipAddr = inet_addr(ip); - return (ipAddr != 0) && (ipAddr != 0xffffffff); +bool validateIpAddress(const char* ip, size_t size) { + char tmp[128] = {0}; // buffer to build null-terminated string + assert(size < 128); + + strncpy(tmp, ip, size); + + in_addr_t ipAddr = inet_addr(tmp); + + return ipAddr != INADDR_NONE; } -int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd* pCmd) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t tscTansformSQLFunctionForSTableQuery(SQueryInfo* pQueryInfo) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - if (pMeterMetaInfo->pMeterMeta == NULL || !UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (pMeterMetaInfo->pMeterMeta == NULL || !UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return TSDB_CODE_INVALID_SQL; } @@ -2630,39 +2166,40 @@ int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd* pCmd) { int16_t type = 0; int16_t intermediateBytes = 0; - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, k); - TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); + TAOS_FIELD* pField = tscFieldInfoGetField(pQueryInfo, k); int16_t functionId = aAggs[pExpr->functionId].stableFuncId; if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) || - (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST)) { + (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) || + (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { if (getResultDataInfo(pField->type, pField->bytes, functionId, pExpr->param[0].i64Key, &type, &bytes, &intermediateBytes, 0, true) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - tscSqlExprUpdate(pCmd, k, functionId, pExpr->colInfo.colIdx, TSDB_DATA_TYPE_BINARY, bytes); + tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->colInfo.colIdx, TSDB_DATA_TYPE_BINARY, bytes); // todo refactor pExpr->interResBytes = intermediateBytes; } } - tscFieldInfoUpdateOffset(pCmd); + tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); return TSDB_CODE_SUCCESS; } /* transfer the field-info back to original input format */ -void tscRestoreSQLFunctionForMetricQuery(SSqlCmd* pCmd) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { +void tscRestoreSQLFunctionForMetricQuery(SQueryInfo* pQueryInfo) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return; } - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); - TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + TAOS_FIELD* pField = tscFieldInfoGetField(pQueryInfo, i); if ((pExpr->functionId >= TSDB_FUNC_FIRST_DST && pExpr->functionId <= TSDB_FUNC_LAST_DST) || (pExpr->functionId >= TSDB_FUNC_SUM && pExpr->functionId <= TSDB_FUNC_MAX)) { @@ -2672,27 +2209,29 @@ void tscRestoreSQLFunctionForMetricQuery(SSqlCmd* pCmd) { } } -bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { +bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo) { const char* msg1 = "TWA not allowed to apply to super table directly"; - const char* msg2 = "functions not supported for super table"; - const char* msg3 = "TWA only support group by tbname for super table query"; - + const char* msg2 = "TWA only support group by tbname for super table query"; + const char* msg3 = "function not support for super table query"; + // filter sql function not supported by metric query yet. - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_METRIC) == 0) { + invalidSqlErrMsg(pQueryInfo->msg, msg3); return true; } } - if (tscIsTWAQuery(pCmd)) { - if (pCmd->groupbyExpr.numOfGroupCols == 0) { - invalidSqlErrMsg(pCmd, msg1); + if (tscIsTWAQuery(pQueryInfo)) { + if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) { + invalidSqlErrMsg(pQueryInfo->msg, msg1); return true; } - if (pCmd->groupbyExpr.numOfGroupCols != 1 || pCmd->groupbyExpr.columnInfo[0].colIdx != TSDB_TBNAME_COLUMN_INDEX) { - invalidSqlErrMsg(pCmd, msg3); + if (pQueryInfo->groupbyExpr.numOfGroupCols != 1 || + pQueryInfo->groupbyExpr.columnInfo[0].colIdx != TSDB_TBNAME_COLUMN_INDEX) { + invalidSqlErrMsg(pQueryInfo->msg, msg2); return true; } } @@ -2700,23 +2239,21 @@ bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { return false; } -static bool functionCompatibleCheck(SSqlCmd* pCmd) { - const char* msg1 = "column on select clause not allowed"; - +static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) { int32_t startIdx = 0; - int32_t functionID = tscSqlExprGet(pCmd, startIdx)->functionId; + int32_t functionID = tscSqlExprGet(pQueryInfo, startIdx)->functionId; // ts function can be simultaneously used with any other functions. if (functionID == TSDB_FUNC_TS || functionID == TSDB_FUNC_TS_DUMMY) { startIdx++; } - int32_t factor = funcCompatDefList[tscSqlExprGet(pCmd, startIdx)->functionId]; + int32_t factor = funcCompatDefList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; // diff function cannot be executed with other function // arithmetic function can be executed with other arithmetic functions - for (int32_t i = startIdx + 1; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = startIdx + 1; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int16_t functionId = pExpr->functionId; if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) { @@ -2735,29 +2272,40 @@ static bool functionCompatibleCheck(SSqlCmd* pCmd) { return true; } -void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); +void updateTagColumnIndex(SQueryInfo* pQueryInfo, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); - // update tags column index for group by tags - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { - int32_t index = pCmd->groupbyExpr.columnInfo[i].colIdx; - - for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { - int32_t tagColIndex = pMeterMetaInfo->tagColumnIndex[j]; - if (tagColIndex == index) { - pCmd->groupbyExpr.columnInfo[i].colIdx = j; - break; + /* + * update tags column index for group by tags + * group by columns belong to this table + */ + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 && pQueryInfo->groupbyExpr.tableIndex == tableIndex) { + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { + int32_t index = pQueryInfo->groupbyExpr.columnInfo[i].colIdx; + + for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { + int32_t tagColIndex = pMeterMetaInfo->tagColumnIndex[j]; + if (tagColIndex == index) { + pQueryInfo->groupbyExpr.columnInfo[i].colIdx = j; + break; + } } } } // update tags column index for expression - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + if (!TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { // not tags, continue continue; } + // not belongs to this table + if (pExpr->uid != pMeterMetaInfo->pMeterMeta->uid) { + continue; + } + for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { if (pExpr->colInfo.colIdx == pMeterMetaInfo->tagColumnIndex[j]) { pExpr->colInfo.colIdx = j; @@ -2765,14 +2313,37 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { } } } + + // update join condition tag column index + SJoinInfo* pJoinInfo = &pQueryInfo->tagCond.joinInfo; + if (!pJoinInfo->hasJoin) { // not join query + return; + } + + assert(pJoinInfo->left.uid != pJoinInfo->right.uid); + + // the join condition expression node belongs to this table(super table) + if (pMeterMetaInfo->pMeterMeta->uid == pJoinInfo->left.uid) { + for (int32_t i = 0; i < pMeterMetaInfo->numOfTags; ++i) { + if (pJoinInfo->left.tagCol == pMeterMetaInfo->tagColumnIndex[i]) { + pJoinInfo->left.tagCol = i; + } + } + } + + if (pMeterMetaInfo->pMeterMeta->uid == pJoinInfo->right.uid) { + for (int32_t i = 0; i < pMeterMetaInfo->numOfTags; ++i) { + if (pJoinInfo->right.tagCol == pMeterMetaInfo->tagColumnIndex[i]) { + pJoinInfo->right.tagCol = i; + } + } + } } -int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { +int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd) { const char* msg1 = "too many columns in group by clause"; const char* msg2 = "invalid column name in group by clause"; - const char* msg4 = "group by only available for STable query"; - const char* msg5 = "group by columns must belong to one table"; - const char* msg6 = "only support group by one ordinary column"; + const char* msg3 = "group by columns must belong to one table"; const char* msg7 = "not support group by expression"; const char* msg8 = "not allowed column type for group by"; const char* msg9 = "tags not allowed for table query"; @@ -2784,16 +2355,15 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { return TSDB_CODE_SUCCESS; } - pCmd->groupbyExpr.numOfGroupCols = pList->nExpr; + pQueryInfo->groupbyExpr.numOfGroupCols = pList->nExpr; if (pList->nExpr > TSDB_MAX_TAGS) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } SMeterMeta* pMeterMeta = NULL; SSchema* pSchema = NULL; + SSchema s = tsGetTbnameColumnSchema(); - SSchema s = {0}; - int32_t numOfReqTags = 0; int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; for (int32_t i = 0; i < pList->nExpr; ++i) { @@ -2802,43 +2372,33 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&token, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); + if (getColumnIndexByName(&token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } if (tableIndex != index.tableIndex && tableIndex >= 0) { - return invalidSqlErrMsg(pCmd, msg5); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } tableIndex = index.tableIndex; - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); pMeterMeta = pMeterMetaInfo->pMeterMeta; - // TODO refactor!!!!!!!!!!!!!!1 if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - s.colId = TSDB_TBNAME_COLUMN_INDEX; - s.type = TSDB_DATA_TYPE_BINARY; - s.bytes = TSDB_METER_NAME_LEN; - strcpy(s.name, TSQL_TBNAME_L); - pSchema = &s; } else { pSchema = tsGetColumnSchema(pMeterMeta, index.columnIndex); } - int16_t type = 0; - int16_t bytes = 0; - char* name = NULL; - bool groupTag = false; if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= pMeterMeta->numOfColumns) { groupTag = true; } if (groupTag) { - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - return invalidSqlErrMsg(pCmd, msg9); + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg9); } int32_t relIndex = index.columnIndex; @@ -2846,36 +2406,36 @@ int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { relIndex -= pMeterMeta->numOfColumns; } - pCmd->groupbyExpr.columnInfo[i] = + pQueryInfo->groupbyExpr.columnInfo[i] = (SColIndexEx){.colIdx = relIndex, .flag = TSDB_COL_TAG, .colId = pSchema->colId}; // relIndex; - addRequiredTagColumn(pCmd, pCmd->groupbyExpr.columnInfo[i].colIdx, index.tableIndex); + addRequiredTagColumn(pQueryInfo, pQueryInfo->groupbyExpr.columnInfo[i].colIdx, index.tableIndex); } else { // check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by - if (pSchema->type > TSDB_DATA_TYPE_BIGINT) { - return invalidSqlErrMsg(pCmd, msg8); + if (pSchema->type > TSDB_DATA_TYPE_BINARY) { + return invalidSqlErrMsg(pQueryInfo->msg, msg8); } - tscColumnBaseInfoInsert(pCmd, &index); - pCmd->groupbyExpr.columnInfo[i] = + tscColumnBaseInfoInsert(pQueryInfo, &index); + pQueryInfo->groupbyExpr.columnInfo[i] = (SColIndexEx){.colIdx = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId}; // relIndex; - pCmd->groupbyExpr.orderType = TSQL_SO_ASC; + pQueryInfo->groupbyExpr.orderType = TSQL_SO_ASC; if (i == 0 && pList->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg7); + return invalidSqlErrMsg(pQueryInfo->msg, msg7); } } } - pCmd->groupbyExpr.tableIndex = tableIndex; + pQueryInfo->groupbyExpr.tableIndex = tableIndex; return TSDB_CODE_SUCCESS; } -void setColumnOffsetValueInResultset(SSqlCmd* pCmd) { - if (QUERY_IS_STABLE_QUERY(pCmd->type)) { - tscFieldInfoUpdateOffset(pCmd); +void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo) { + if (QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { + tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); } else { - tscFieldInfoCalOffset(pCmd); + tscFieldInfoCalOffset(pQueryInfo); } } @@ -2885,7 +2445,7 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumnBase* pColumn) { } int32_t size = pColumn->numOfFilters + 1; - char* tmp = realloc(pColumn->filterInfo, sizeof(SColumnFilterInfo) * (size)); + char* tmp = (char*)realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size)); if (tmp != NULL) { pColumn->filterInfo = (SColumnFilterInfo*)tmp; } @@ -2898,12 +2458,12 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumnBase* pColumn) { return pColFilterInfo; } -static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SColumnFilterInfo* pColumnFilter, SColumnIndex* columnIndex, - tSQLExpr* pExpr) { +static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter, + SColumnIndex* columnIndex, tSQLExpr* pExpr) { const char* msg = "not supported filter condition"; tSQLExpr* pRight = pExpr->pRight; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, columnIndex->tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, columnIndex->tableIndex); SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, columnIndex->columnIndex); @@ -2913,7 +2473,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SColumnFilterInfo* pColu } else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) { colType = TSDB_DATA_TYPE_DOUBLE; } else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BINARY == pRight->val.nType)) { - int retVal = setColumnFilterInfoForTimestamp(pCmd, &pRight->val); + int retVal = setColumnFilterInfoForTimestamp(pQueryInfo, &pRight->val); if (TSDB_CODE_SUCCESS != retVal) { return retVal; } @@ -2963,7 +2523,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SColumnFilterInfo* pColu pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE; break; default: - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } return TSDB_CODE_SUCCESS; @@ -2986,8 +2546,6 @@ typedef struct SCondExpr { static int32_t getTimeRange(int64_t* stime, int64_t* etime, tSQLExpr* pRight, int32_t optr, int16_t timePrecision); -static int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr); - static int32_t tSQLExprNodeToString(tSQLExpr* pExpr, char** str) { if (pExpr->nSQLOptr == TK_ID) { // column name strncpy(*str, pExpr->colInfo.z, pExpr->colInfo.n); @@ -3103,26 +2661,22 @@ static int32_t optrToString(tSQLExpr* pExpr, char** exprString) { return TSDB_CODE_SUCCESS; } -static int32_t tablenameListToString(tSQLExpr* pExpr, char* str) { +static int32_t tablenameListToString(tSQLExpr* pExpr, /*char* str*/ SStringBuilder* sb) { tSQLExprList* pList = pExpr->pParam; if (pList->nExpr <= 0) { return TSDB_CODE_INVALID_SQL; } if (pList->nExpr > 0) { - strcpy(str, QUERY_COND_REL_PREFIX_IN); - str += QUERY_COND_REL_PREFIX_IN_LEN; + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); } - int32_t len = 0; for (int32_t i = 0; i < pList->nExpr; ++i) { tSQLExpr* pSub = pList->a[i].pNode; - strncpy(str + len, pSub->val.pz, pSub->val.nLen); - - len += pSub->val.nLen; + taosStringBuilderAppendStringLen(sb, pSub->val.pz, pSub->val.nLen); if (i < pList->nExpr - 1) { - str[len++] = TBNAME_LIST_SEP[0]; + taosStringBuilderAppendString(sb, TBNAME_LIST_SEP); } if (pSub->val.nLen <= 0 || pSub->val.nLen > TSDB_METER_NAME_LEN) { @@ -3133,11 +2687,9 @@ static int32_t tablenameListToString(tSQLExpr* pExpr, char* str) { return TSDB_CODE_SUCCESS; } -static int32_t tablenameCondToString(tSQLExpr* pExpr, char* str) { - strcpy(str, QUERY_COND_REL_PREFIX_LIKE); - str += strlen(QUERY_COND_REL_PREFIX_LIKE); - - strcpy(str, pExpr->val.pz); +static int32_t tablenameCondToString(tSQLExpr* pExpr, /*char* str*/ SStringBuilder* sb) { + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN); + taosStringBuilderAppendString(sb, pExpr->val.pz); return TSDB_CODE_SUCCESS; } @@ -3149,17 +2701,16 @@ enum { TSQL_EXPR_TBNAME = 3, }; -static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pIndex->tableIndex); +static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pIndex->tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; SSchema* pSchema = tsGetColumnSchema(pMeterMeta, pIndex->columnIndex); const char* msg1 = "non binary column not support like operator"; const char* msg2 = "binary column not support this operator"; - const char* msg3 = "OR is not supported on different column filter"; - SColumnBase* pColumn = tscColumnBaseInfoInsert(pCmd, pIndex); + SColumnBase* pColumn = tscColumnBaseInfoInsert(pQueryInfo, pIndex); SColumnFilterInfo* pColFilter = NULL; /* @@ -3185,25 +2736,25 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SColumnIndex* pIndex, tSQL if (pColFilter->filterOnBinary) { if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE && pExpr->nSQLOptr != TK_LIKE) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } else { if (pExpr->nSQLOptr == TK_LIKE) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } pColumn->colIndex = *pIndex; - return doExtractColumnFilterInfo(pCmd, pColFilter, pIndex, pExpr); + return doExtractColumnFilterInfo(pQueryInfo, pColFilter, pIndex, pExpr); } -static void relToString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { +static void relToString(tSQLExpr* pExpr, char** str) { assert(pExpr->nSQLOptr == TK_AND || pExpr->nSQLOptr == TK_OR); const char* or = "OR"; const char*and = "AND"; - // if (pCmd->tagCond.relType == TSQL_STABLE_QTYPE_COND) { + // if (pQueryInfo->tagCond.relType == TSQL_STABLE_QTYPE_COND) { if (pExpr->nSQLOptr == TK_AND) { strcpy(*str, and); *str += strlen(and); @@ -3211,10 +2762,9 @@ static void relToString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { strcpy(*str, or); *str += strlen(or); } - // } } -static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { +static int32_t getTagCondString(tSQLExpr* pExpr, char** str) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -3223,14 +2773,14 @@ static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { *(*str) = '('; *str += 1; - int32_t ret = getTagCondString(pCmd, pExpr->pLeft, str); + int32_t ret = getTagCondString(pExpr->pLeft, str); if (ret != TSDB_CODE_SUCCESS) { return ret; } - relToString(pCmd, pExpr, str); + relToString(pExpr, str); - ret = getTagCondString(pCmd, pExpr->pRight, str); + ret = getTagCondString(pExpr->pRight, str); *(*str) = ')'; *str += 1; @@ -3241,7 +2791,7 @@ static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) { return tSQLExprLeafToString(pExpr, true, str); } -static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, char* str) { +static int32_t getTablenameCond(SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SStringBuilder* sb) { const char* msg0 = "invalid table name list"; if (pTableCond == NULL) { @@ -3258,63 +2808,61 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, char* str) int32_t ret = TSDB_CODE_SUCCESS; if (pTableCond->nSQLOptr == TK_IN) { - ret = tablenameListToString(pRight, str); + ret = tablenameListToString(pRight, sb); } else if (pTableCond->nSQLOptr == TK_LIKE) { - ret = tablenameCondToString(pRight, str); + ret = tablenameCondToString(pRight, sb); } if (ret != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg0); + invalidSqlErrMsg(pQueryInfo->msg, msg0); } return ret; } -static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, tSQLExpr* pExpr, int32_t relOptr) { +static int32_t getColumnQueryCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t relOptr) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } if (!isExprDirectParentOfLeaftNode(pExpr)) { // internal node - int32_t ret = getColumnQueryCondInfo(pCmd, pExpr->pLeft, pExpr->nSQLOptr); + int32_t ret = getColumnQueryCondInfo(pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr); if (ret != TSDB_CODE_SUCCESS) { return ret; } - return getColumnQueryCondInfo(pCmd, pExpr->pRight, pExpr->nSQLOptr); + return getColumnQueryCondInfo(pQueryInfo, pExpr->pRight, pExpr->nSQLOptr); } else { // handle leaf node SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - return extractColumnFilterInfo(pCmd, &index, pExpr, relOptr); + return extractColumnFilterInfo(pQueryInfo, &index, pExpr, relOptr); } } -static int32_t getJoinCondInfo(SSqlObj* pSql, tSQLExpr* pExpr) { +static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) { const char* msg = "invalid join query condition"; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - SSqlCmd* pCmd = &pSql->cmd; - if (!isExprDirectParentOfLeaftNode(pExpr)) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } - STagCond* pTagCond = &pCmd->tagCond; + STagCond* pTagCond = &pQueryInfo->tagCond; SJoinNode* pLeft = &pTagCond->joinInfo.left; SJoinNode* pRight = &pTagCond->joinInfo.right; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); int16_t tagColIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; pLeft->uid = pMeterMetaInfo->pMeterMeta->uid; @@ -3322,11 +2870,11 @@ static int32_t getJoinCondInfo(SSqlObj* pSql, tSQLExpr* pExpr) { strcpy(pLeft->meterId, pMeterMetaInfo->name); index = (SColumnIndex)COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pRight->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); tagColIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; pRight->uid = pMeterMetaInfo->pMeterMeta->uid; @@ -3371,54 +2919,44 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { return TSDB_CODE_SUCCESS; } -static int32_t validateSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols, SColumnIdListRes* pList) { +static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList) { if (pExpr->nSQLOptr == TK_ID) { - bool validColumnName = false; - - SColumnList* list = &pList->list; - - for (int32_t i = 0; i < numOfCols; ++i) { - if (strncasecmp(pExpr->colInfo.z, pSchema[i].name, pExpr->colInfo.n) == 0 && - pExpr->colInfo.n == strlen(pSchema[i].name)) { - if (pSchema[i].type < TSDB_DATA_TYPE_TINYINT || pSchema[i].type > TSDB_DATA_TYPE_DOUBLE) { - return TSDB_CODE_INVALID_SQL; - } - - if (pList != NULL) { - list->ids[list->num++].columnIndex = (int16_t)i; - } - - validColumnName = true; + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - } - - if (!validColumnName) { - return TSDB_CODE_INVALID_SQL; - } + // if column is timestamp, bool, binary, nchar, not support arithmetic, so return invalid sql + SMeterMeta* pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex)->pMeterMeta; + SSchema* pSchema = tsGetSchema(pMeterMeta) + index.columnIndex; + if ((pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) || (pSchema->type == TSDB_DATA_TYPE_BOOL) + || (pSchema->type == TSDB_DATA_TYPE_BINARY) || (pSchema->type == TSDB_DATA_TYPE_NCHAR)){ + return TSDB_CODE_INVALID_SQL; + } + + pList->ids[pList->num++] = index; } else if (pExpr->nSQLOptr == TK_FLOAT && (isnan(pExpr->val.dKey) || isinf(pExpr->val.dKey))) { return TSDB_CODE_INVALID_SQL; - } else if (pExpr->nSQLOptr >= TK_MIN && pExpr->nSQLOptr <= TK_LAST_ROW) { + } else if (pExpr->nSQLOptr >= TK_MIN && pExpr->nSQLOptr <= TK_AVG_IRATE) { return TSDB_CODE_INVALID_SQL; } return TSDB_CODE_SUCCESS; } -static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols, - SColumnIdListRes* pList) { +static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } tSQLExpr* pLeft = pExpr->pLeft; if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { - int32_t ret = validateArithmeticSQLExpr(pLeft, pSchema, numOfCols, pList); + int32_t ret = validateArithmeticSQLExpr(pLeft, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } } else { - int32_t ret = validateSQLExpr(pLeft, pSchema, numOfCols, pList); + int32_t ret = validateSQLExpr(pLeft, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3426,12 +2964,12 @@ static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int3 tSQLExpr* pRight = pExpr->pRight; if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) { - int32_t ret = validateArithmeticSQLExpr(pRight, pSchema, numOfCols, pList); + int32_t ret = validateArithmeticSQLExpr(pRight, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } } else { - int32_t ret = validateSQLExpr(pRight, pSchema, numOfCols, pList); + int32_t ret = validateSQLExpr(pRight, pQueryInfo, pList); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3453,8 +2991,8 @@ static bool isValidExpr(tSQLExpr* pLeft, tSQLExpr* pRight, int32_t optr) { * * However, columnA < 4+12 is valid */ - if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_LAST_ROW) || - (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_LAST_ROW) || + if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE) || + (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE) || (pLeft->nSQLOptr >= TK_BOOL && pLeft->nSQLOptr <= TK_BINARY && pRight->nSQLOptr >= TK_BOOL && pRight->nSQLOptr <= TK_BINARY)) { return false; @@ -3498,7 +3036,7 @@ static void exchangeExpr(tSQLExpr* pExpr) { } } -static bool validateJoinExprNode(SSqlCmd* pCmd, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) { +static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) { const char* msg1 = "illegal column name"; const char* msg2 = "= is expected in join expression"; const char* msg3 = "join column must have same type"; @@ -3513,40 +3051,40 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, tSQLExpr* pExpr, SColumnIndex* p } if (pExpr->nSQLOptr != TK_EQ) { - invalidSqlErrMsg(pCmd, msg2); + invalidSqlErrMsg(pQueryInfo->msg, msg2); return false; } SColumnIndex rightIndex = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pRight->colInfo, pCmd, &rightIndex) != TSDB_CODE_SUCCESS) { - invalidSqlErrMsg(pCmd, msg1); + if (getColumnIndexByName(&pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) { + invalidSqlErrMsg(pQueryInfo->msg, msg1); return false; } // todo extract function - SMeterMetaInfo* pLeftMeterMeta = tscGetMeterMetaInfo(pCmd, pLeftIndex->tableIndex); + SMeterMetaInfo* pLeftMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pLeftIndex->tableIndex); SSchema* pLeftSchema = tsGetSchema(pLeftMeterMeta->pMeterMeta); int16_t leftType = pLeftSchema[pLeftIndex->columnIndex].type; - SMeterMetaInfo* pRightMeterMeta = tscGetMeterMetaInfo(pCmd, rightIndex.tableIndex); + SMeterMetaInfo* pRightMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, rightIndex.tableIndex); SSchema* pRightSchema = tsGetSchema(pRightMeterMeta->pMeterMeta); int16_t rightType = pRightSchema[rightIndex.columnIndex].type; if (leftType != rightType) { - invalidSqlErrMsg(pCmd, msg3); + invalidSqlErrMsg(pQueryInfo->msg, msg3); return false; } else if (pLeftIndex->tableIndex == rightIndex.tableIndex) { - invalidSqlErrMsg(pCmd, msg4); + invalidSqlErrMsg(pQueryInfo->msg, msg4); return false; } else if (leftType == TSDB_DATA_TYPE_BINARY || leftType == TSDB_DATA_TYPE_NCHAR) { - invalidSqlErrMsg(pCmd, msg6); + invalidSqlErrMsg(pQueryInfo->msg, msg6); return false; } // table to table/ super table to super table are allowed - if (UTIL_METER_IS_METRIC(pLeftMeterMeta) != UTIL_METER_IS_METRIC(pRightMeterMeta)) { - invalidSqlErrMsg(pCmd, msg5); + if (UTIL_METER_IS_SUPERTABLE(pLeftMeterMeta) != UTIL_METER_IS_SUPERTABLE(pRightMeterMeta)) { + invalidSqlErrMsg(pQueryInfo->msg, msg5); return false; } @@ -3565,10 +3103,10 @@ static bool validTableNameOptr(tSQLExpr* pExpr) { return false; } -static int32_t setExprToCond(SSqlCmd* pCmd, tSQLExpr** parent, tSQLExpr* pExpr, const char* msg, int32_t parentOptr) { +static int32_t setExprToCond(tSQLExpr** parent, tSQLExpr* pExpr, const char* msg, int32_t parentOptr, char* msgBuf) { if (*parent != NULL) { if (parentOptr == TK_OR && msg != NULL) { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(msgBuf, msg); } *parent = tSQLExprCreate((*parent), pExpr, parentOptr); @@ -3579,16 +3117,15 @@ static int32_t setExprToCond(SSqlCmd* pCmd, tSQLExpr** parent, tSQLExpr* pExpr, return TSDB_CODE_SUCCESS; } -static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, +static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, int32_t parentOptr) { const char* msg1 = "meter query cannot use tags filter"; const char* msg2 = "illegal column name"; const char* msg3 = "only one query time range allowed"; const char* msg4 = "only one join condition allowed"; - const char* msg5 = "AND is allowed to filter on different ordinary columns"; - const char* msg6 = "not support ordinary column join"; - const char* msg7 = "only one query condition on tbname allowed"; - const char* msg8 = "only in/like allowed in filter table name"; + const char* msg5 = "not support ordinary column join"; + const char* msg6 = "only one query condition on tbname allowed"; + const char* msg7 = "only in/like allowed in filter table name"; tSQLExpr* pLeft = (*pExpr)->pLeft; tSQLExpr* pRight = (*pExpr)->pRight; @@ -3596,23 +3133,23 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* int32_t ret = TSDB_CODE_SUCCESS; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg2); + if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } assert(isExprDirectParentOfLeaftNode(*pExpr)); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range - if (!validateJoinExprNode(pCmd, *pExpr, &index)) { + if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) { return TSDB_CODE_INVALID_SQL; } // set join query condition if (pRight->nSQLOptr == TK_ID) { // no need to keep the timestamp join condition - pCmd->type |= TSDB_QUERY_TYPE_JOIN_QUERY; + pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; pCondExpr->tsJoin = true; /* @@ -3621,7 +3158,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* */ tSQLExprDestroy(*pExpr); } else { - ret = setExprToCond(pCmd, &pCondExpr->pTimewindow, *pExpr, msg3, parentOptr); + ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg); } *pExpr = NULL; // remove this expression @@ -3630,7 +3167,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // query on tags // check for tag query condition if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // check for like expression @@ -3643,14 +3180,14 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* if ((!isTablenameToken(&pLeft->colInfo)) && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_BINARY && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_NCHAR) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } // in case of in operator, keep it in a seperate attribute if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { if (!validTableNameOptr(*pExpr)) { - return invalidSqlErrMsg(pCmd, msg8); + return invalidSqlErrMsg(pQueryInfo->msg, msg7); } if (pCondExpr->pTableCond == NULL) { @@ -3658,23 +3195,23 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr->relType = parentOptr; pCondExpr->tableCondIndex = index.tableIndex; } else { - return invalidSqlErrMsg(pCmd, msg7); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } *type = TSQL_EXPR_TBNAME; *pExpr = NULL; } else { if (pRight->nSQLOptr == TK_ID) { // join on tag columns for stable query - if (!validateJoinExprNode(pCmd, *pExpr, &index)) { + if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) { return TSDB_CODE_INVALID_SQL; } if (pCondExpr->pJoinExpr != NULL) { - return invalidSqlErrMsg(pCmd, msg4); + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } - pCmd->type |= TSDB_QUERY_TYPE_JOIN_QUERY; - ret = setExprToCond(pCmd, &pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr); + pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; + ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); *pExpr = NULL; } else { // do nothing @@ -3689,17 +3226,18 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* *type = TSQL_EXPR_COLUMN; if (pRight->nSQLOptr == TK_ID) { // other column cannot be served as the join column - return invalidSqlErrMsg(pCmd, msg6); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } - ret = setExprToCond(pCmd, &pCondExpr->pColumnCond, *pExpr, NULL, parentOptr); + ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg); *pExpr = NULL; // remove it from expr tree } return ret; } -int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, int32_t parentOptr) { +int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, + int32_t parentOptr) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -3717,12 +3255,12 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t rightType = -1; if (!isExprDirectParentOfLeaftNode(*pExpr)) { - int32_t ret = getQueryCondExpr(pCmd, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr); + int32_t ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr); if (ret != TSDB_CODE_SUCCESS) { return ret; } - ret = getQueryCondExpr(pCmd, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr); + ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3733,7 +3271,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, */ if (leftType != rightType) { if ((*pExpr)->nSQLOptr == TK_OR && (leftType + rightType != TSQL_EXPR_TBNAME + TSQL_EXPR_TAG)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } @@ -3743,7 +3281,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, tSQLExpr** pExpr, SCondExpr* pCondExpr, exchangeExpr(*pExpr); - return handleExprInQueryCond(pCmd, pExpr, pCondExpr, type, parentOptr); + return handleExprInQueryCond(pQueryInfo, pExpr, pCondExpr, type, parentOptr); } static void doCompactQueryExpr(tSQLExpr** pExpr) { @@ -3777,12 +3315,12 @@ static void doCompactQueryExpr(tSQLExpr** pExpr) { } } -static void doExtractExprForSTable(tSQLExpr** pExpr, SSqlCmd* pCmd, tSQLExpr** pOut, int32_t tableIndex) { +static void doExtractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) { if (isExprDirectParentOfLeaftNode(*pExpr)) { tSQLExpr* pLeft = (*pExpr)->pLeft; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return; } @@ -3799,16 +3337,16 @@ static void doExtractExprForSTable(tSQLExpr** pExpr, SSqlCmd* pCmd, tSQLExpr** p } else { *pOut = tSQLExprCreate(NULL, NULL, (*pExpr)->nSQLOptr); - doExtractExprForSTable(&(*pExpr)->pLeft, pCmd, &((*pOut)->pLeft), tableIndex); - doExtractExprForSTable(&(*pExpr)->pRight, pCmd, &((*pOut)->pRight), tableIndex); + doExtractExprForSTable(&(*pExpr)->pLeft, pQueryInfo, &((*pOut)->pLeft), tableIndex); + doExtractExprForSTable(&(*pExpr)->pRight, pQueryInfo, &((*pOut)->pRight), tableIndex); } } -static tSQLExpr* extractExprForSTable(tSQLExpr** pExpr, SSqlCmd* pCmd, int32_t tableIndex) { +static tSQLExpr* extractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex) { tSQLExpr* pResExpr = NULL; if (*pExpr != NULL) { - doExtractExprForSTable(pExpr, pCmd, &pResExpr, tableIndex); + doExtractExprForSTable(pExpr, pQueryInfo, &pResExpr, tableIndex); doCompactQueryExpr(&pResExpr); } @@ -3828,40 +3366,38 @@ int tableNameCompar(const void* lhs, const void* rhs) { return ret > 0 ? 1 : -1; } -static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_t tableCondIndex, - char* tmpTableCondBuf) { - SSqlCmd* pCmd = &pSql->cmd; - const char* msg = "meter name too long"; +static int32_t setTableCondForMetricQuery(SQueryInfo* pQueryInfo, const char* account, tSQLExpr* pExpr, + int16_t tableCondIndex, SStringBuilder* sb) { + const char* msg = "table name too long"; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableCondIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableCondIndex); - STagCond* pTagCond = &pSql->cmd.tagCond; + STagCond* pTagCond = &pQueryInfo->tagCond; pTagCond->tbnameCond.uid = pMeterMetaInfo->pMeterMeta->uid; - SString* pTableCond = &pCmd->tagCond.tbnameCond.cond; - SStringAlloc(pTableCond, 4096); - assert(pExpr->nSQLOptr == TK_LIKE || pExpr->nSQLOptr == TK_IN); if (pExpr->nSQLOptr == TK_LIKE) { - strcpy(pTableCond->z, tmpTableCondBuf); - pTableCond->n = strlen(pTableCond->z); + char* str = taosStringBuilderGetResult(sb, NULL); + pQueryInfo->tagCond.tbnameCond.cond = strdup(str); return TSDB_CODE_SUCCESS; } - strcpy(pTableCond->z, QUERY_COND_REL_PREFIX_IN); - pTableCond->n += strlen(QUERY_COND_REL_PREFIX_IN); + SStringBuilder sb1 = {0}; + taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); char db[TSDB_METER_ID_LEN] = {0}; // remove the duplicated input table names int32_t num = 0; - char** segments = strsplit(tmpTableCondBuf + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); - qsort(segments, num, sizeof(void*), tableNameCompar); + char* tableNameString = taosStringBuilderGetResult(sb, NULL); + + char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); + qsort(segments, num, POINTER_BYTES, tableNameCompar); int32_t j = 1; for (int32_t i = 1; i < num; ++i) { @@ -3872,35 +3408,39 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_ num = j; SSQLToken dbToken = extractDBName(pMeterMetaInfo->name, db); - char* acc = getAccountId(pSql); for (int32_t i = 0; i < num; ++i) { - SStringEnsureRemain(pTableCond, TSDB_METER_ID_LEN); - if (i >= 1) { - pTableCond->z[pTableCond->n++] = TBNAME_LIST_SEP[0]; + taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); } + char idBuf[TSDB_METER_ID_LEN + 1] = {0}; int32_t xlen = strlen(segments[i]); SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; - int32_t ret = setObjFullName(pTableCond->z + pTableCond->n, acc, &dbToken, &t, &xlen); + int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); if (ret != TSDB_CODE_SUCCESS) { + taosStringBuilderDestroy(&sb1); tfree(segments); - invalidSqlErrMsg(pCmd, msg); + + invalidSqlErrMsg(pQueryInfo->msg, msg); return ret; } - pTableCond->n += xlen; + taosStringBuilderAppendString(&sb1, idBuf); } + char* str = taosStringBuilderGetResult(&sb1, NULL); + pQueryInfo->tagCond.tbnameCond.cond = strdup(str); + + taosStringBuilderDestroy(&sb1); tfree(segments); return TSDB_CODE_SUCCESS; } -static bool validateFilterExpr(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { - SColumnBase* pColBase = &pCmd->colList.pColList[i]; +static bool validateFilterExpr(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { + SColumnBase* pColBase = &pQueryInfo->colList.pColList[i]; for (int32_t j = 0; j < pColBase->numOfFilters; ++j) { SColumnFilterInfo* pColFilter = &pColBase->filterInfo[j]; @@ -3922,7 +3462,7 @@ static bool validateFilterExpr(SSqlCmd* pCmd) { return true; } -static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { +static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) { const char* msg0 = "invalid timestamp"; const char* msg1 = "only one time stamp window allowed"; @@ -3932,19 +3472,19 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { if (!isExprDirectParentOfLeaftNode(pExpr)) { if (pExpr->nSQLOptr == TK_OR) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - getTimeRangeFromExpr(pCmd, pExpr->pLeft); + getTimeRangeFromExpr(pQueryInfo, pExpr->pLeft); - return getTimeRangeFromExpr(pCmd, pExpr->pRight); + return getTimeRangeFromExpr(pQueryInfo, pExpr->pRight); } else { SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByNameEx(&pExpr->pLeft->colInfo, pCmd, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; tSQLExpr* pRight = pExpr->pRight; @@ -3953,45 +3493,45 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, tSQLExpr* pExpr) { TSKEY etime = INT64_MAX; if (getTimeRange(&stime, &etime, pRight, pExpr->nSQLOptr, pMeterMeta->precision) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg0); + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } // update the timestamp query range - if (pCmd->stime < stime) { - pCmd->stime = stime; + if (pQueryInfo->stime < stime) { + pQueryInfo->stime = stime; } - if (pCmd->etime > etime) { - pCmd->etime = etime; + if (pQueryInfo->etime > etime) { + pQueryInfo->etime = etime; } } return TSDB_CODE_SUCCESS; } -static int32_t validateJoinExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr) { +static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { const char* msg1 = "super table join requires tags column"; const char* msg2 = "timestamp join condition missing"; const char* msg3 = "condition missing for join query"; - if (!QUERY_IS_JOIN_QUERY(pCmd->type)) { - if (pCmd->numOfTables == 1) { + if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { + if (pQueryInfo->numOfTables == 1) { return TSDB_CODE_SUCCESS; } else { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // for stable join, tag columns - // must be present for join + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { // for stable join, tag columns + // must be present for join if (pCondExpr->pJoinExpr == NULL) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } if (!pCondExpr->tsJoin) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } return TSDB_CODE_SUCCESS; @@ -4019,127 +3559,126 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) { } } -int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { - SSqlCmd* pCmd = &pSql->cmd; - - if (pExpr == NULL) { - return TSDB_CODE_SUCCESS; - } +static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + SColumnIndex index = {0}; - pCmd->stime = 0; - pCmd->etime = INT64_MAX; + getColumnIndexByName(&pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); - int32_t ret = TSDB_CODE_SUCCESS; + int32_t columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + addRequiredTagColumn(pQueryInfo, columnInfo, index.tableIndex); - const char* msg1 = "invalid expression"; - SCondExpr condExpr = {0}; + getColumnIndexByName(&pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index); + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index.tableIndex); - if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { - return invalidSqlErrMsg(pCmd, msg1); + columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + addRequiredTagColumn(pQueryInfo, columnInfo, index.tableIndex); } +} - ret = doParseWhereClause(pSql, pExpr, &condExpr); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } +static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) { + int32_t ret = TSDB_CODE_SUCCESS; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (QUERY_IS_JOIN_QUERY(pCmd->type) && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - SColumnIndex index = {0}; + if (pCondExpr->pTagCond != NULL) { + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + tSQLExpr* p1 = extractExprForSTable(pExpr, pQueryInfo, i); - getColumnIndexByNameEx(&condExpr.pJoinExpr->pLeft->colInfo, pCmd, &index); - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); - int32_t columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); + char c[TSDB_MAX_TAGS_LEN] = {0}; + char* str = c; + + if ((ret = getTagCondString(p1, &str)) != TSDB_CODE_SUCCESS) { + return ret; + } - getColumnIndexByNameEx(&condExpr.pJoinExpr->pRight->colInfo, pCmd, &index); - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + tsSetMetricQueryCond(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); - columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); + doCompactQueryExpr(pExpr); + tSQLExprDestroy(p1); + } + + pCondExpr->pTagCond = NULL; } - cleanQueryExpr(&condExpr); return ret; } +int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql) { + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } -int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr) { const char* msg = "invalid filter expression"; + const char* msg1 = "invalid expression"; - int32_t type = 0; - SSqlCmd* pCmd = &pSql->cmd; + int32_t ret = TSDB_CODE_SUCCESS; - /* - * tags query condition may be larger than 512bytes, - * therefore, we need to prepare enough large space - */ - char tableNameCond[TSDB_MAX_SQL_LEN] = {0}; + pQueryInfo->stime = 0; + pQueryInfo->etime = INT64_MAX; - int32_t ret = TSDB_CODE_SUCCESS; - if ((ret = getQueryCondExpr(pCmd, pExpr, condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { + // tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space + SStringBuilder sb = {0}; + SCondExpr condExpr = {0}; + + if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } + + int32_t type = 0; + if ((ret = getQueryCondExpr(pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { return ret; } doCompactQueryExpr(pExpr); // after expression compact, the expression tree is only include tag query condition - condExpr->pTagCond = (*pExpr); + condExpr.pTagCond = (*pExpr); // 1. check if it is a join query - if ((ret = validateJoinExpr(pCmd, condExpr)) != TSDB_CODE_SUCCESS) { + if ((ret = validateJoinExpr(pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) { return ret; } // 2. get the query time range - if ((ret = getTimeRangeFromExpr(pCmd, condExpr->pTimewindow)) != TSDB_CODE_SUCCESS) { + if ((ret = getTimeRangeFromExpr(pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { return ret; } // 3. get the tag query condition - if (condExpr->pTagCond != NULL) { - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - tSQLExpr* p1 = extractExprForSTable(pExpr, pCmd, i); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - - char c[TSDB_MAX_TAGS_LEN] = {0}; - char* str = c; - if ((ret = getTagCondString(pCmd, p1, &str)) != TSDB_CODE_SUCCESS) { - return ret; - } - - tsSetMetricQueryCond(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); - - doCompactQueryExpr(pExpr); - tSQLExprDestroy(p1); - } - - condExpr->pTagCond = NULL; + if ((ret = getTagQueryCondExpr(pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { + return ret; } // 4. get the table name query condition - if ((ret = getTablenameCond(pCmd, condExpr->pTableCond, tableNameCond)) != TSDB_CODE_SUCCESS) { + if ((ret = getTablenameCond(pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { return ret; } // 5. other column query condition - if ((ret = getColumnQueryCondInfo(pCmd, condExpr->pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { + if ((ret = getColumnQueryCondInfo(pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { return ret; } // 6. join condition - if ((ret = getJoinCondInfo(pSql, condExpr->pJoinExpr)) != TSDB_CODE_SUCCESS) { + if ((ret = getJoinCondInfo(pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { return ret; } // 7. query condition for table name - pCmd->tagCond.relType = (condExpr->relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; - ret = setTableCondForMetricQuery(pSql, condExpr->pTableCond, condExpr->tableCondIndex, tableNameCond); - if (!validateFilterExpr(pCmd)) { - return invalidSqlErrMsg(pCmd, msg); + pQueryInfo->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; + + ret = setTableCondForMetricQuery(pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb); + taosStringBuilderDestroy(&sb); + + if (!validateFilterExpr(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg); } + doAddJoinTagsColumnsIntoTagList(pQueryInfo, &condExpr); + + cleanQueryExpr(&condExpr); return ret; } @@ -4232,11 +3771,11 @@ int32_t getTimeRange(int64_t* stime, int64_t* etime, tSQLExpr* pRight, int32_t o return TSDB_CODE_SUCCESS; } -int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { +int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) { const char rep[] = {'(', ')', '*', ',', '.', '/', '\\', '+', '-', '%', ' '}; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - char* fieldName = tscFieldInfoGetField(pCmd, i)->name; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + char* fieldName = tscFieldInfoGetField(pQueryInfo, i)->name; for (int32_t j = 0; j < TSDB_COL_NAME_LEN && fieldName[j] != 0; ++j) { for (int32_t k = 0; k < tListLen(rep); ++k) { if (fieldName[j] == rep[k]) { @@ -4250,12 +3789,12 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { } // the column name may be identical, here check again - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - char* fieldName = tscFieldInfoGetField(pCmd, i)->name; - for (int32_t j = i + 1; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - if (strncasecmp(fieldName, tscFieldInfoGetField(pCmd, j)->name, TSDB_COL_NAME_LEN) == 0) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + char* fieldName = tscFieldInfoGetField(pQueryInfo, i)->name; + for (int32_t j = i + 1; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + if (strncasecmp(fieldName, tscFieldInfoGetField(pQueryInfo, j)->name, TSDB_COL_NAME_LEN) == 0) { const char* msg = "duplicated column name in new table"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } } @@ -4263,115 +3802,124 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { return TSDB_CODE_SUCCESS; } -int32_t parseFillClause(SSqlCmd* pCmd, SQuerySQL* pQuerySQL) { +int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) { tVariantList* pFillToken = pQuerySQL->fillType; tVariantListItem* pItem = &pFillToken->a[0]; const int32_t START_INTERPO_COL_IDX = 1; - const char* msg = "illegal value or data overflow"; - const char* msg1 = "value is expected"; - const char* msg2 = "invalid fill option"; + + const char* msg = "illegal value or data overflow"; + const char* msg1 = "value is expected"; + const char* msg2 = "invalid fill option"; if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); + } + + if (pQueryInfo->defaultVal == NULL) { + pQueryInfo->defaultVal = calloc(pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(int64_t)); + if (pQueryInfo->defaultVal == NULL) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } } if (strncasecmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) { - pCmd->interpoType = TSDB_INTERPO_NONE; + pQueryInfo->interpoType = TSDB_INTERPO_NONE; } else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) { - pCmd->interpoType = TSDB_INTERPO_NULL; - for (int32_t i = START_INTERPO_COL_IDX; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); - setNull((char*)&pCmd->defaultVal[i], pFields->type, pFields->bytes); + pQueryInfo->interpoType = TSDB_INTERPO_NULL; + for (int32_t i = START_INTERPO_COL_IDX; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD* pFields = tscFieldInfoGetField(pQueryInfo, i); + setNull((char*)&pQueryInfo->defaultVal[i], pFields->type, pFields->bytes); } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { - pCmd->interpoType = TSDB_INTERPO_PREV; + pQueryInfo->interpoType = TSDB_INTERPO_PREV; } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { - // not support yet - pCmd->interpoType = TSDB_INTERPO_LINEAR; + pQueryInfo->interpoType = TSDB_INTERPO_LINEAR; } else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) { - pCmd->interpoType = TSDB_INTERPO_SET_VALUE; + pQueryInfo->interpoType = TSDB_INTERPO_SET_VALUE; if (pFillToken->nExpr == 1) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } int32_t startPos = 1; int32_t numOfFillVal = pFillToken->nExpr - 1; /* for point interpolation query, we do not have the timestamp column */ - if (tscIsPointInterpQuery(pCmd)) { + if (tscIsPointInterpQuery(pQueryInfo)) { startPos = 0; - if (numOfFillVal > pCmd->fieldsInfo.numOfOutputCols) { - numOfFillVal = pCmd->fieldsInfo.numOfOutputCols; + if (numOfFillVal > pQueryInfo->fieldsInfo.numOfOutputCols) { + numOfFillVal = pQueryInfo->fieldsInfo.numOfOutputCols; } } else { - numOfFillVal = - (pFillToken->nExpr > pCmd->fieldsInfo.numOfOutputCols) ? pCmd->fieldsInfo.numOfOutputCols : pFillToken->nExpr; + numOfFillVal = (pFillToken->nExpr > pQueryInfo->fieldsInfo.numOfOutputCols) + ? pQueryInfo->fieldsInfo.numOfOutputCols + : pFillToken->nExpr; } int32_t j = 1; for (int32_t i = startPos; i < numOfFillVal; ++i, ++j) { - TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); - - int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pCmd->defaultVal[i], pFields->type); - if (ret != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg); - } - + TAOS_FIELD* pFields = tscFieldInfoGetField(pQueryInfo, i); + if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setNull((char*)(&pCmd->defaultVal[i]), pFields->type, pFields->bytes); + setNull((char*)(&pQueryInfo->defaultVal[i]), pFields->type, pFields->bytes); + continue; + } + + int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->defaultVal[i], pFields->type); + if (ret != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg); } } - if ((pFillToken->nExpr < pCmd->fieldsInfo.numOfOutputCols) || - ((pFillToken->nExpr - 1 < pCmd->fieldsInfo.numOfOutputCols) && (tscIsPointInterpQuery(pCmd)))) { + if ((pFillToken->nExpr < pQueryInfo->fieldsInfo.numOfOutputCols) || + ((pFillToken->nExpr - 1 < pQueryInfo->fieldsInfo.numOfOutputCols) && (tscIsPointInterpQuery(pQueryInfo)))) { tVariantListItem* lastItem = &pFillToken->a[pFillToken->nExpr - 1]; - for (int32_t i = numOfFillVal; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); - tVariantDump(&lastItem->pVar, (char*)&pCmd->defaultVal[i], pFields->type); + for (int32_t i = numOfFillVal; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD* pFields = tscFieldInfoGetField(pQueryInfo, i); + tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->defaultVal[i], pFields->type); if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setNull((char*)(&pCmd->defaultVal[i]), pFields->type, pFields->bytes); + setNull((char*)(&pQueryInfo->defaultVal[i]), pFields->type, pFields->bytes); } } } } else { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } return TSDB_CODE_SUCCESS; } -static void setDefaultOrderInfo(SSqlCmd* pCmd) { +static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { /* set default timestamp order information for all queries */ - pCmd->order.order = TSQL_SO_ASC; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + pQueryInfo->order.order = TSQL_SO_ASC; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - if (isTopBottomQuery(pCmd)) { - pCmd->order.order = TSQL_SO_ASC; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + if (isTopBottomQuery(pQueryInfo)) { + pQueryInfo->order.order = TSQL_SO_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } else { - pCmd->order.orderColId = -1; + pQueryInfo->order.orderColId = -1; } /* for metric query, set default ascending order for group output */ - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - pCmd->groupbyExpr.orderType = TSQL_SO_ASC; + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + pQueryInfo->groupbyExpr.orderType = TSQL_SO_ASC; } } -int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema, int32_t numOfCols) { +int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) { const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; const char* msg2 = "only support order by primary timestamp and queried column"; const char* msg3 = "only support order by primary timestamp and first tag in groupby clause"; - setDefaultOrderInfo(pCmd); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + setDefaultOrderInfo(pQueryInfo); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); if (pQuerySql->pSortOrder == NULL) { return TSDB_CODE_SUCCESS; @@ -4387,11 +3935,11 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema */ if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { if (pSortorder->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg0); + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } } else { if (pSortorder->nExpr > 2) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } } @@ -4406,18 +3954,17 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema SSQLToken columnName = {pVar->nLen, pVar->nType, pVar->pz}; SColumnIndex index = {0}; - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // metric query - if (getColumnIndexByNameEx(&columnName, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { // metric query + if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } bool orderByTags = false; bool orderByTS = false; - bool orderByCol = false; if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) { int32_t relTagIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - if (relTagIndex == pCmd->groupbyExpr.columnInfo[0].colIdx) { + if (relTagIndex == pQueryInfo->groupbyExpr.columnInfo[0].colIdx) { orderByTags = true; } } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { @@ -4428,83 +3975,83 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema orderByTS = true; } - if (!(orderByTags || orderByTS) && !isTopBottomQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg3); + if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } else { assert(!(orderByTags && orderByTS)); } if (pSortorder->nExpr == 1) { if (orderByTags) { - pCmd->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - pCmd->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; - } else if (isTopBottomQuery(pCmd)) { + pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - SSqlExpr* pExpr = tscSqlExprGet(pCmd, 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); assert(pExpr->functionId == TSDB_FUNC_TS); - pExpr = tscSqlExprGet(pCmd, 1); + pExpr = tscSqlExprGet(pQueryInfo, 1); if (pExpr->colInfo.colIdx != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; - pCmd->order.orderColId = pSchema[index.columnIndex].colId; + pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } else { - pCmd->order.order = pSortorder->a[0].sortOrder; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + pQueryInfo->order.order = pSortorder->a[0].sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } if (pSortorder->nExpr == 2) { if (orderByTags) { - pCmd->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - pCmd->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; } else { - pCmd->order.order = pSortorder->a[0].sortOrder; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + pQueryInfo->order.order = pSortorder->a[0].sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } tVariant* pVar2 = &pSortorder->a[1].pVar; SSQLToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; - if (getColumnIndexByNameEx(&cname, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (getColumnIndexByName(&cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } else { - pCmd->order.order = pSortorder->a[1].sortOrder; - pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + pQueryInfo->order.order = pSortorder->a[1].sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } } else { // meter query - if (getColumnIndexByNameEx(&columnName, pCmd, &index) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pCmd)) { - return invalidSqlErrMsg(pCmd, msg2); + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - if (isTopBottomQuery(pCmd)) { + if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ - SSqlExpr* pExpr = tscSqlExprGet(pCmd, 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); assert(pExpr->functionId == TSDB_FUNC_TS); - pExpr = tscSqlExprGet(pCmd, 1); + pExpr = tscSqlExprGet(pQueryInfo, 1); if (pExpr->colInfo.colIdx != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; - pCmd->order.orderColId = pSchema[index.columnIndex].colId; + pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } - pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; } return TSDB_CODE_SUCCESS; @@ -4513,64 +4060,58 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const int32_t DEFAULT_TABLE_INDEX = 0; - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, DEFAULT_TABLE_INDEX); + const char* msg1 = "invalid table name"; + const char* msg2 = "table name too long"; + const char* msg3 = "manipulation of tag available for super table"; + const char* msg4 = "set tag value only available for table"; + const char* msg5 = "only support add one tag"; + const char* msg6 = "column can only be modified by super table"; + SSqlCmd* pCmd = &pSql->cmd; SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo; - pCmd->command = TSDB_SQL_ALTER_TABLE; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, DEFAULT_TABLE_INDEX); if (tscValidateName(&(pAlterSQL->name)) != TSDB_CODE_SUCCESS) { - const char* msg = "invalid table name"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } - if (setMeterID(pSql, &(pAlterSQL->name), 0) != TSDB_CODE_SUCCESS) { - const char* msg = "table name too long"; - return invalidSqlErrMsg(pCmd, msg); + if (setMeterID(pMeterMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo->name, DEFAULT_TABLE_INDEX); + int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo); if (ret != TSDB_CODE_SUCCESS) { return ret; } SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; - SSchema* pSchema = tsGetSchema(pMeterMeta); - if (pInfo->sqlType == ALTER_TABLE_TAGS_ADD || pInfo->sqlType == ALTER_TABLE_TAGS_DROP || - pInfo->sqlType == ALTER_TABLE_TAGS_CHG) { + if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN || + pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { - const char* msg = "manipulation of tag available for metric"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - } else if ((pInfo->sqlType == ALTER_TABLE_TAGS_SET) && (UTIL_METER_IS_METRIC(pMeterMetaInfo))) { - const char* msg = "set tag value only available for table"; - return invalidSqlErrMsg(pCmd, msg); - } else if ((pInfo->sqlType == ALTER_TABLE_ADD_COLUMN || pInfo->sqlType == ALTER_TABLE_DROP_COLUMN) && + } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo))) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) && UTIL_METER_IS_CREATE_FROM_METRIC(pMeterMetaInfo)) { - const char* msg = "column can only be modified by metric"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg6); } - if (pInfo->sqlType == ALTER_TABLE_TAGS_ADD) { - pCmd->count = TSDB_ALTER_TABLE_ADD_TAG_COLUMN; - + if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { tFieldList* pFieldList = pAlterSQL->pAddColumns; if (pFieldList->nField > 1) { - const char* msg = "only support add one tag"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } if (!validateOneTags(pCmd, &pFieldList->p[0])) { return TSDB_CODE_INVALID_SQL; } - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, 0, &pFieldList->p[0]); - pCmd->numOfCols = 1; // only one column - - } else if (pInfo->sqlType == ALTER_TABLE_TAGS_DROP) { - pCmd->count = TSDB_ALTER_TABLE_DROP_TAG_COLUMN; - + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, 0, &pFieldList->p[0]); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { const char* msg1 = "no tags can be dropped"; const char* msg2 = "only support one tag"; const char* msg3 = "tag name too long"; @@ -4578,48 +4119,40 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg5 = "primary tag cannot be dropped"; if (pMeterMeta->numOfTags == 1) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // numOfTags == 1 if (pAlterSQL->varList->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } tVariantListItem* pItem = &pAlterSQL->varList->a[0]; if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg3); + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - int32_t idx = -1; - for (int32_t i = 0; i < pMeterMeta->numOfTags; ++i) { - int32_t tagIdx = i + pMeterMeta->numOfColumns; - char* tagName = pSchema[tagIdx].name; - size_t nLen = strlen(tagName); + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SSQLToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING}; - if ((strncasecmp(tagName, pItem->pVar.pz, nLen) == 0) && (pItem->pVar.nLen == nLen)) { - idx = i; - break; - } + if (getColumnIndexByName(&name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - if (idx == -1) { - return invalidSqlErrMsg(pCmd, msg4); - } else if (idx == 0) { - return invalidSqlErrMsg(pCmd, msg5); + if (index.columnIndex < pMeterMeta->numOfColumns) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } else if (index.columnIndex == 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg5); } - char name[128] = {0}; - strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 1; // only one column - - } else if (pInfo->sqlType == ALTER_TABLE_TAGS_CHG) { + char name1[128] = {0}; + strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name1, + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { const char* msg1 = "tag name too long"; const char* msg2 = "invalid tag name"; - pCmd->count = TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN; tVariantList* pVarList = pAlterSQL->varList; if (pVarList->nExpr > 2) { return TSDB_CODE_INVALID_SQL; @@ -4629,178 +4162,148 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pDstItem = &pAlterSQL->varList->a[1]; if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } if (pSrcItem->pVar.nType != TSDB_DATA_TYPE_BINARY || pDstItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER; SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER; SSQLToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING}; - if (getColumnIndexByNameEx(&srcToken, pCmd, &srcIndex) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } SSQLToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING}; - if (getColumnIndexByNameEx(&destToken, pCmd, &destIndex) == TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } char name[128] = {0}; strncpy(name, pVarList->a[0].pVar.pz, pVarList->a[0].pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); memset(name, 0, tListLen(name)); strncpy(name, pVarList->a[1].pVar.pz, pVarList->a[1].pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 2; - } else if (pInfo->sqlType == ALTER_TABLE_TAGS_SET) { - const char* msg0 = "tag name too long"; + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 1, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { const char* msg1 = "invalid tag value"; - const char* msg2 = "invalid tag name"; + const char* msg2 = "update normal column not supported"; const char* msg3 = "tag value too long"; - pCmd->count = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; - - // Note: update can only be applied to meter not metric. - // the following is handle display tags value for meters created according to metric - + // Note: update can only be applied to table not super table. + // the following is handle display tags value for meters created according to super table tVariantList* pVarList = pAlterSQL->varList; tVariant* pTagName = &pVarList->a[0].pVar; - if (pTagName->nLen > TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg0); - } - - int32_t tagsIndex = -1; - SSchema* pTagsSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); - - for (int32_t i = 0; i < pMeterMetaInfo->pMeterMeta->numOfTags; ++i) { - if (strcmp(pTagName->pz, pTagsSchema[i].name) == 0 && strlen(pTagsSchema[i].name) == pTagName->nLen) { - tagsIndex = i; - break; - } + SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + SSQLToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen}; + if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; } - if (tagsIndex == -1) { - return invalidSqlErrMsg(pCmd, msg2); + if (columnIndex.columnIndex < pMeterMeta->numOfColumns) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } - if (tVariantDump(&pVarList->a[1].pVar, pCmd->payload, pTagsSchema[tagsIndex].type) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(pCmd, msg1); + SSchema* pTagsSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, columnIndex.columnIndex); + if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data /*pCmd->payload*/, pTagsSchema->type) != + TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // validate the length of binary - if ((pTagsSchema[tagsIndex].type == TSDB_DATA_TYPE_BINARY || pTagsSchema[tagsIndex].type == TSDB_DATA_TYPE_NCHAR) && - pVarList->a[1].pVar.nLen > pTagsSchema[tagsIndex].bytes) { - return invalidSqlErrMsg(pCmd, msg3); + if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) && + pVarList->a[1].pVar.nLen > pTagsSchema->bytes) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - char name[128] = {0}; - strncpy(name, pTagName->pz, pTagName->nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 1; - } else if (pInfo->sqlType == ALTER_TABLE_ADD_COLUMN) { - pCmd->count = TSDB_ALTER_TABLE_ADD_COLUMN; + char name1[128] = {0}; + strncpy(name1, pTagName->pz, pTagName->nLen); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name1, + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) { tFieldList* pFieldList = pAlterSQL->pAddColumns; if (pFieldList->nField > 1) { const char* msg = "only support add one column"; - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(pQueryInfo->msg, msg); } if (!validateOneColumn(pCmd, &pFieldList->p[0])) { return TSDB_CODE_INVALID_SQL; } - tscFieldInfoSetValFromField(&pCmd->fieldsInfo, 0, &pFieldList->p[0]); - pCmd->numOfCols = 1; // only one column - } else if (pInfo->sqlType == ALTER_TABLE_DROP_COLUMN) { - pCmd->count = TSDB_ALTER_TABLE_DROP_COLUMN; - + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, 0, &pFieldList->p[0]); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) { const char* msg1 = "no columns can be dropped"; const char* msg2 = "only support one column"; - const char* msg3 = "column name too long"; const char* msg4 = "illegal column name"; - const char* msg5 = "primary timestamp column cannot be dropped"; + const char* msg3 = "primary timestamp column cannot be dropped"; if (pMeterMeta->numOfColumns == TSDB_MIN_COLUMNS) { // - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } if (pAlterSQL->varList->nExpr > 1) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } tVariantListItem* pItem = &pAlterSQL->varList->a[0]; - if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { - return invalidSqlErrMsg(pCmd, msg3); - } - - int32_t idx = -1; - for (int32_t i = 0; i < pMeterMeta->numOfColumns; ++i) { - char* colName = pSchema[i].name; - size_t len = strlen(colName); - if ((strncasecmp(colName, pItem->pVar.pz, len) == 0) && (len == pItem->pVar.nLen)) { - idx = i; - break; - } + SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + SSQLToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; + if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); } - if (idx == -1) { - return invalidSqlErrMsg(pCmd, msg4); - } else if (idx == 0) { - return invalidSqlErrMsg(pCmd, msg5); + if (columnIndex.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } - char name[128] = {0}; - strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); - tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); - - pCmd->numOfCols = 1; // only one column + char name1[128] = {0}; + strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen); + tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name1, + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); } return TSDB_CODE_SUCCESS; } -int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd) { +int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo) { const char* msg0 = "sample interval can not be less than 10ms."; const char* msg1 = "functions not allowed in select clause"; - if (pCmd->nAggTimeInterval != 0 && pCmd->nAggTimeInterval < 10) { - return invalidSqlErrMsg(pCmd, msg0); + if (pQueryInfo->nAggTimeInterval != 0 && pQueryInfo->nAggTimeInterval < 10) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functId = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId; if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } return TSDB_CODE_SUCCESS; } -int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd) { +int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo) { bool isProjectionFunction = false; const char* msg1 = "column projection is not compatible with interval"; - const char* msg2 = "interval not allowed for tag queries"; // multi-output set/ todo refactor - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); // projection query on primary timestamp, the selectivity function needs to be present. if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool hasSelectivity = false; - for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - SSqlExpr* pEx = tscSqlExprGet(pCmd, j); + for (int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr* pEx = tscSqlExprGet(pQueryInfo, j); if ((aAggs[pEx->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) { hasSelectivity = true; break; @@ -4819,7 +4322,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd) { } if (isProjectionFunction) { - invalidSqlErrMsg(pCmd, msg1); + invalidSqlErrMsg(pQueryInfo->msg, msg1); } return isProjectionFunction == true ? TSDB_CODE_INVALID_SQL : TSDB_CODE_SUCCESS; @@ -4952,62 +4455,80 @@ int32_t validateColumnName(char* name) { return TSDB_CODE_SUCCESS; } -bool hasTimestampForPointInterpQuery(SSqlCmd* pCmd) { - if (!tscIsPointInterpQuery(pCmd)) { +bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo) { + if (!tscIsPointInterpQuery(pQueryInfo)) { return true; } - return (pCmd->stime == pCmd->etime) && (pCmd->stime != 0); + return (pQueryInfo->stime == pQueryInfo->etime) && (pQueryInfo->stime != 0); } -int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL* pQuerySql, SSqlObj* pSql) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); const char* msg0 = "soffset/offset can not be less than 0"; const char* msg1 = "slimit/soffset only available for STable query"; const char* msg2 = "function not supported on table"; const char* msg3 = "slimit/soffset can not apply to projection query"; - + // handle the limit offset value, validate the limit - pCmd->limit = pQuerySql->limit; - pCmd->slimit = pQuerySql->slimit; + pQueryInfo->limit = pQuerySql->limit; + pQueryInfo->clauseLimit = pQueryInfo->limit.limit; + + pQueryInfo->slimit = pQuerySql->slimit; - if (pCmd->slimit.offset < 0 || pCmd->limit.offset < 0) { - return invalidSqlErrMsg(pCmd, msg0); + if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg0); } - if (pCmd->limit.limit == 0) { + if (pQueryInfo->limit.limit == 0) { tscTrace("%p limit 0, no output result", pSql); - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; } - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { bool queryOnTags = false; - if (tscQueryOnlyMetricTags(pCmd, &queryOnTags) != TSDB_CODE_SUCCESS) { + if (tscQueryOnlyMetricTags(pQueryInfo, &queryOnTags) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } if (queryOnTags == true) { // local handle the metric tag query - pCmd->command = TSDB_SQL_RETRIEVE_TAGS; + pQueryInfo->command = TSDB_SQL_RETRIEVE_TAGS; } else { - if (tscProjectionQueryOnMetric(pCmd) && (pCmd->slimit.limit > 0 || pCmd->slimit.offset > 0)) { - return invalidSqlErrMsg(pCmd, msg3); + if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { + if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); + } + + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { +// if (pQueryInfo->order.orderColId >= 0) { +// if (pQueryInfo->limit.limit == -1) { +// return invalidSqlErrMsg(pQueryInfo->msg, msg4); +// } else if (pQueryInfo->limit.limit > 10000) { // the result set can not be larger than 10000 +// //todo use global config parameter +// return invalidSqlErrMsg(pQueryInfo->msg, msg5); +// } +// } + + pQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; // for projection query on super table, all queries are subqueries + } } } - if (pCmd->slimit.limit == 0) { + if (pQueryInfo->slimit.limit == 0) { tscTrace("%p limit 0, no output result", pSql); - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; return TSDB_CODE_SUCCESS; } /* - * get the distribution of all tables among available virtual nodes that satisfy query condition and - * created according to this super table from management node. - * And then launching multiple async-queries on required virtual nodes, which is the first-stage query operation. + * Get the distribution of all tables among all available virtual nodes that are qualified for the query condition + * and created according to this super table from management node. + * And then launching multiple async-queries against all qualified virtual nodes, during the first-stage + * query operation. */ - int32_t code = tscGetMetricMeta(pSql); + int32_t code = tscGetMetricMeta(pSql, clauseIndex); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -5016,21 +4537,35 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; if (pMeterMetaInfo->pMeterMeta == NULL || pMetricMeta == NULL || pMetricMeta->numOfMeters == 0) { tscTrace("%p no table in metricmeta, no output result", pSql); - pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; } // keep original limitation value in globalLimit - pCmd->globalLimit = pCmd->limit.limit; + pQueryInfo->clauseLimit = pQueryInfo->limit.limit; + pQueryInfo->prjOffset = pQueryInfo->limit.offset; + + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + /* + * the limitation/offset value should be removed during retrieve data from virtual node, + * since the global order are done in client side, so the limitation should also + * be done at the client side. + */ + if (pQueryInfo->limit.limit > 0) { + pQueryInfo->limit.limit = -1; + } + + pQueryInfo->limit.offset = 0; + } } else { - if (pCmd->slimit.limit != -1 || pCmd->slimit.offset != 0) { - return invalidSqlErrMsg(pCmd, msg1); + if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } // filter the query functions operating on "tbname" column that are not supported by normal columns. - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } } @@ -5040,11 +4575,11 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { const char* msg = "invalid number of options"; - + pMsg->daysToKeep = htonl(-1); pMsg->daysToKeep1 = htonl(-1); pMsg->daysToKeep2 = htonl(-1); - + tVariantList* pKeep = pCreateDb->keep; if (pKeep != NULL) { switch (pKeep->nExpr) { @@ -5062,36 +4597,34 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* p pMsg->daysToKeep2 = htonl(pKeep->a[2].pVar.i64Key); break; } - default: { - return invalidSqlErrMsg(pCmd, msg); - } + default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } } } - + return TSDB_CODE_SUCCESS; } static int32_t setTimePrecisionOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) { const char* msg = "invalid time precision"; - + pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default - + SSQLToken* pToken = &pCreateDbInfo->precision; if (pToken->n > 0) { pToken->n = strdequote(pToken->z); - + if (strncmp(pToken->z, TSDB_TIME_PRECISION_MILLI_STR, pToken->n) == 0 && strlen(TSDB_TIME_PRECISION_MILLI_STR) == pToken->n) { // time precision for this db: million second pMsg->precision = TSDB_TIME_PRECISION_MILLI; } else if (strncmp(pToken->z, TSDB_TIME_PRECISION_MICRO_STR, pToken->n) == 0 && - strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) { + strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) { pMsg->precision = TSDB_TIME_PRECISION_MICRO; } else { - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } } - + return TSDB_CODE_SUCCESS; } @@ -5099,7 +4632,7 @@ static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { pMsg->blocksPerMeter = htons(pCreateDb->numOfBlocksPerTable); pMsg->compression = pCreateDb->compressionLevel; - pMsg->commitLog = (char) pCreateDb->commitLog; + pMsg->commitLog = (char)pCreateDb->commitLog; pMsg->commitTime = htonl(pCreateDb->commitTime); pMsg->maxSessions = htonl(pCreateDb->tablesPerVnode); pMsg->cacheNumOfBlocks.fraction = pCreateDb->numOfAvgCacheBlocks; @@ -5112,51 +4645,54 @@ static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) { SCreateDbMsg* pMsg = (SCreateDbMsg*)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead)); setCreateDBOption(pMsg, pCreateDbSql); - + if (setKeepOption(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - + if (setTimePrecisionOption(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - + if (tscCheckCreateDbParams(pCmd, pMsg) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } - + return TSDB_CODE_SUCCESS; } -void tscAddTimestampColumn(SSqlCmd* pCmd, int16_t functionId, int16_t tableIndex) { +void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex) { // the first column not timestamp column, add it SSqlExpr* pExpr = NULL; - if (pCmd->exprsInfo.numOfExprs > 0) { - pExpr = tscSqlExprGet(pCmd, 0); + if (pQueryInfo->exprsInfo.numOfExprs > 0) { + pExpr = tscSqlExprGet(pQueryInfo, 0); } if (pExpr == NULL || pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX || pExpr->functionId != functionId) { SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscSqlExprInsert(pCmd, 0, functionId, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); + pExpr = tscSqlExprInsert(pQueryInfo, 0, functionId, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE); pExpr->colInfo.flag = TSDB_COL_NORMAL; // NOTE: tag column does not add to source column list SColumnList ids = getColumnList(1, tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX); - insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, "ts"); + insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, "ts"); } } -void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex) { - if (pParentObj->cmd.groupbyExpr.numOfGroupCols > 0) { - int32_t num = pSql->cmd.exprsInfo.numOfExprs; - SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, num - 1); - SSqlCmd* pCmd = &pSql->cmd; +void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex) { + SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentObj->cmd, subClauseIndex); + + if (pParentQueryInfo->groupbyExpr.numOfGroupCols > 0) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex); + int32_t num = pQueryInfo->exprsInfo.numOfExprs; + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, num - 1); if (pExpr->functionId != TSDB_FUNC_TAG) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - int16_t columnInfo = tscGetJoinTagColIndexByUid(pCmd, pMeterMetaInfo->pMeterMeta->uid); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); + int16_t columnInfo = tscGetJoinTagColIndexByUid(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid); SColumnIndex index = {.tableIndex = 0, .columnIndex = columnInfo}; SSchema* pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta); @@ -5164,51 +4700,59 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIn int16_t bytes = pSchema[index.columnIndex].bytes; char* name = pSchema[index.columnIndex].name; - pExpr = tscSqlExprInsert(pCmd, pCmd->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, type, bytes, bytes); + pExpr = tscSqlExprInsert(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, type, bytes, + bytes); pExpr->colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list SColumnList ids = {0}; - insertResultField(pCmd, pCmd->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); + insertResultField(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); int32_t relIndex = index.columnIndex; pExpr->colInfo.colIdx = relIndex; - pCmd->groupbyExpr.columnInfo[0].colIdx = relIndex; + pQueryInfo->groupbyExpr.columnInfo[0].colIdx = relIndex; - addRequiredTagColumn(pCmd, pCmd->groupbyExpr.columnInfo[0].colIdx, 0); + addRequiredTagColumn(pQueryInfo, pQueryInfo->groupbyExpr.columnInfo[0].colIdx, 0); } } } -void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex) { - int32_t index = pCmd->groupbyExpr.columnInfo[tagIndex].colIdx; +// limit the output to be 1 for each state value +static void doLimitOutputNormalColOfGroupby(SSqlExpr* pExpr) { + int32_t outputRow = 1; + tVariantCreateFromBinary(&pExpr->param[0], (char*) &outputRow, sizeof(int32_t), TSDB_DATA_TYPE_INT); + pExpr->numOfParams = 1; +} + +void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) { + int32_t index = pQueryInfo->groupbyExpr.columnInfo[tagIndex].colIdx; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, index); SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = index}; - SSqlExpr* pExpr = tscSqlExprInsert(pCmd, pCmd->fieldsInfo.numOfOutputCols, TSDB_FUNC_PRJ, &colIndex, pSchema->type, - pSchema->bytes, pSchema->bytes); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, TSDB_FUNC_PRJ, &colIndex, + pSchema->type, pSchema->bytes, pSchema->bytes); pExpr->colInfo.flag = TSDB_COL_NORMAL; - pExpr->param[0].i64Key = 1; - pExpr->numOfParams = 1; - + doLimitOutputNormalColOfGroupby(pExpr); + // NOTE: tag column does not add to source column list SColumnList list = {0}; list.num = 1; list.ids[0] = colIndex; - insertResultField(pCmd, pCmd->fieldsInfo.numOfOutputCols, &list, pSchema->bytes, pSchema->type, pSchema->name); - tscFieldInfoUpdateVisible(&pCmd->fieldsInfo, pCmd->fieldsInfo.numOfOutputCols - 1, false); + insertResultField(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, &list, pSchema->bytes, pSchema->type, + pSchema->name); + tscFieldInfoUpdateVisible(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutputCols - 1, false); } -static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { +static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) { int32_t tagLength = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAGPRJ || pExpr->functionId == TSDB_FUNC_TAG) { pExpr->functionId = TSDB_FUNC_TAG_DUMMY; tagLength += pExpr->resBytes; @@ -5218,14 +4762,11 @@ static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { } } - int16_t resType = 0; - int16_t resBytes = 0; - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) { SSchema* pColSchema = &pSchema[pExpr->colInfo.colIdx]; getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, pExpr->param[0].i64Key, &pExpr->resType, @@ -5234,16 +4775,15 @@ static void doUpdateSqlFunctionForTagPrj(SSqlCmd* pCmd) { } } -static void doUpdateSqlFunctionForColPrj(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +static void doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_PRJ) { bool qualifiedCol = false; - for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { - if (pExpr->colInfo.colId == pCmd->groupbyExpr.columnInfo[j].colId) { + for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { + if (pExpr->colInfo.colId == pQueryInfo->groupbyExpr.columnInfo[j].colId) { qualifiedCol = true; - - pExpr->param[0].i64Key = 1; // limit the output to be 1 for each state value + doLimitOutputNormalColOfGroupby(pExpr); pExpr->numOfParams = 1; break; } @@ -5264,12 +4804,12 @@ static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId) return false; } -static bool onlyTagPrjFunction(SSqlCmd* pCmd) { +static bool onlyTagPrjFunction(SQueryInfo* pQueryInfo) { bool hasTagPrj = false; bool hasColumnPrj = false; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_PRJ) { hasColumnPrj = true; } else if (pExpr->functionId == TSDB_FUNC_TAGPRJ) { @@ -5281,16 +4821,16 @@ static bool onlyTagPrjFunction(SSqlCmd* pCmd) { } // check if all the tags prj columns belongs to the group by columns -static bool allTagPrjInGroupby(SSqlCmd* pCmd) { +static bool allTagPrjInGroupby(SQueryInfo* pQueryInfo) { bool allInGroupby = true; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_TAGPRJ) { continue; } - if (!tagColumnInGroupby(&pCmd->groupbyExpr, pExpr->colInfo.colId)) { + if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->colInfo.colId)) { allInGroupby = false; break; } @@ -5300,9 +4840,9 @@ static bool allTagPrjInGroupby(SSqlCmd* pCmd) { return allInGroupby; } -static void updateTagPrjFunction(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +static void updateTagPrjFunction(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAGPRJ) { pExpr->functionId = TSDB_FUNC_TAG; } @@ -5315,18 +4855,16 @@ static void updateTagPrjFunction(SSqlCmd* pCmd) { * 2. if selectivity function and tagprj function both exist, there should be only * one selectivity function exists. */ -static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { +static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) { const char* msg1 = "only one selectivity function allowed in presence of tags function"; - const char* msg2 = "functions not allowed"; const char* msg3 = "aggregation function should not be mixed up with projection"; bool tagColExists = false; - int16_t numOfTimestamp = 0; // primary timestamp column int16_t numOfSelectivity = 0; int16_t numOfAggregation = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAGPRJ || (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { tagColExists = true; @@ -5334,9 +4872,9 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { } } - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; - if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int16_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_ARITHM) { continue; } @@ -5352,44 +4890,44 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { // When the tag projection function on tag column that is not in the group by clause, aggregation function and // selectivity function exist in select clause is not allowed. if (numOfAggregation > 0) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } /* * if numOfSelectivity equals to 0, it is a super table projection query */ if (numOfSelectivity == 1) { - doUpdateSqlFunctionForTagPrj(pCmd); - doUpdateSqlFunctionForColPrj(pCmd); + doUpdateSqlFunctionForTagPrj(pQueryInfo); + doUpdateSqlFunctionForColPrj(pQueryInfo); } else if (numOfSelectivity > 1) { /* * If more than one selectivity functions exist, all the selectivity functions must be last_row. * Otherwise, return with error code. */ - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int16_t functionId = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int16_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functionId == TSDB_FUNC_TAGPRJ) { continue; } if (((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) && (functionId != TSDB_FUNC_LAST_ROW)) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } - doUpdateSqlFunctionForTagPrj(pCmd); - doUpdateSqlFunctionForColPrj(pCmd); + doUpdateSqlFunctionForTagPrj(pQueryInfo); + doUpdateSqlFunctionForColPrj(pQueryInfo); } } else { - if ((pCmd->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) == TSDB_QUERY_TYPE_PROJECTION_QUERY) { - if (numOfAggregation > 0 && pCmd->groupbyExpr.numOfGroupCols == 0) { - return invalidSqlErrMsg(pCmd, msg3); + if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) == TSDB_QUERY_TYPE_PROJECTION_QUERY) { + if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); } if (numOfAggregation > 0 || numOfSelectivity > 0) { // clear the projection type flag - pCmd->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY); - doUpdateSqlFunctionForColPrj(pCmd); + pQueryInfo->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY); + doUpdateSqlFunctionForColPrj(pQueryInfo); } } } @@ -5397,18 +4935,18 @@ static int32_t checkUpdateTagPrjFunctions(SSqlCmd* pCmd) { return TSDB_CODE_SUCCESS; } -static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { +static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) { const char* msg2 = "interval not allowed in group by normal column"; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); int16_t bytes = 0; int16_t type = 0; char* name = NULL; - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { - SColIndexEx* pColIndex = &pCmd->groupbyExpr.columnInfo[i]; + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { + SColIndexEx* pColIndex = &pQueryInfo->groupbyExpr.columnInfo[i]; int16_t colIndex = pColIndex->colIdx; if (pColIndex->colIdx == TSDB_TBNAME_COLUMN_INDEX) { @@ -5425,25 +4963,25 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { } if (TSDB_COL_IS_TAG(pColIndex->flag)) { - SColumnIndex index = {.tableIndex = pCmd->groupbyExpr.tableIndex, .columnIndex = colIndex}; + SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = - tscSqlExprInsert(pCmd, pCmd->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, type, bytes, bytes); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, &index, + type, bytes, bytes); pExpr->colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list SColumnList ids = {0}; - insertResultField(pCmd, pCmd->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); + insertResultField(pQueryInfo, pQueryInfo->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); } else { // if this query is "group by" normal column, interval is not allowed - if (pCmd->nAggTimeInterval > 0) { - return invalidSqlErrMsg(pCmd, msg2); + if (pQueryInfo->nAggTimeInterval > 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } bool hasGroupColumn = false; - for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, j); + for (int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, j); if (pExpr->colInfo.colId == pColIndex->colId) { break; } @@ -5454,7 +4992,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { * but invisible to user */ if (!hasGroupColumn) { - doAddGroupColumnForSubquery(pCmd, i); + doAddGroupColumnForSubquery(pQueryInfo, i); } } } @@ -5462,40 +5000,31 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd) { return TSDB_CODE_SUCCESS; } -int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { +int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { const char* msg1 = "functions/columns not allowed in group by query"; - const char* msg2 = "interval not allowed in group by normal column"; + const char* msg2 = "projection query on columns not allowed"; const char* msg3 = "group by not allowed on projection query"; - const char* msg4 = "tags retrieve not compatible with group by"; - const char* msg5 = "retrieve tags not compatible with group by or interval query"; - - SSqlCmd* pCmd = &pSql->cmd; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + const char* msg4 = "retrieve tags not compatible with group by or interval query"; // only retrieve tags, group by is not supportted if (pCmd->command == TSDB_SQL_RETRIEVE_TAGS) { - if (pCmd->groupbyExpr.numOfGroupCols > 0 || pCmd->nAggTimeInterval > 0) { - return invalidSqlErrMsg(pCmd, msg5); + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->nAggTimeInterval > 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } else { return TSDB_CODE_SUCCESS; } } - if (pCmd->groupbyExpr.numOfGroupCols > 0) { - SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - int16_t bytes = 0; - int16_t type = 0; - char* name = NULL; - + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { // check if all the tags prj columns belongs to the group by columns - if (onlyTagPrjFunction(pCmd) && allTagPrjInGroupby(pCmd)) { - updateTagPrjFunction(pCmd); - return doAddGroupbyColumnsOnDemand(pCmd); + if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) { + updateTagPrjFunction(pQueryInfo); + return doAddGroupbyColumnsOnDemand(pQueryInfo); } // check all query functions in selection clause, multi-output functions are not allowed - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t functId = pExpr->functionId; /* @@ -5504,8 +5033,8 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { */ if (functId == TSDB_FUNC_PRJ && pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool qualified = false; - for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { - SColIndexEx* pColIndex = &pCmd->groupbyExpr.columnInfo[j]; + for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { + SColIndexEx* pColIndex = &pQueryInfo->groupbyExpr.columnInfo[j]; if (pColIndex->colId == pExpr->colInfo.colId) { qualified = true; break; @@ -5513,21 +5042,21 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { } if (!qualified) { - return TSDB_CODE_INVALID_SQL; + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } } if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM && functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colIdx == TSDB_TBNAME_COLUMN_INDEX) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } } - if (checkUpdateTagPrjFunctions(pCmd) != TSDB_CODE_SUCCESS) { + if (checkUpdateTagPrjFunctions(pQueryInfo) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } @@ -5535,34 +5064,34 @@ int32_t doFunctionsCompatibleCheck(SSqlObj* pSql) { * group by tag function must be not changed the function name, otherwise, the group operation may fail to * divide the subset of final result. */ - if (doAddGroupbyColumnsOnDemand(pCmd) != TSDB_CODE_SUCCESS) { + if (doAddGroupbyColumnsOnDemand(pQueryInfo) != TSDB_CODE_SUCCESS) { return TSDB_CODE_INVALID_SQL; } // projection query on metric does not compatible with "group by" syntax - if (tscProjectionQueryOnMetric(pCmd)) { - return invalidSqlErrMsg(pCmd, msg3); + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } return TSDB_CODE_SUCCESS; } else { - return checkUpdateTagPrjFunctions(pCmd); + return checkUpdateTagPrjFunctions(pQueryInfo); } } -int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd) { +int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg1 = "only one expression allowed"; const char* msg2 = "invalid expression in select clause"; const char* msg3 = "invalid function"; tSQLExprList* pExprList = pQuerySql->pSelection; if (pExprList->nExpr != 1) { - return invalidSqlErrMsg(pCmd, msg1); + return invalidSqlErrMsg(pQueryInfo->msg, msg1); } tSQLExpr* pExpr = pExprList->a[0].pNode; if (pExpr->operand.z == NULL) { - return invalidSqlErrMsg(pCmd, msg2); + return invalidSqlErrMsg(pQueryInfo->msg, msg2); } // TODO redefine the function @@ -5581,7 +5110,7 @@ int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd) { } } - SSqlExpr* pExpr1 = tscSqlExprInsertEmpty(pCmd, 0, TSDB_FUNC_TAG_DUMMY); + SSqlExpr* pExpr1 = tscSqlExprInsertEmpty(pQueryInfo, 0, TSDB_FUNC_TAG_DUMMY); if (pExprList->a[0].aliasName != NULL) { strncpy(pExpr1->aliasName, pExprList->a[0].aliasName, tListLen(pExpr1->aliasName)); } else { @@ -5590,97 +5119,548 @@ int32_t doLocalQueryProcess(SQuerySQL* pQuerySql, SSqlCmd* pCmd) { switch (index) { case 0: - pCmd->command = TSDB_SQL_CURRENT_DB; + pQueryInfo->command = TSDB_SQL_CURRENT_DB; return TSDB_CODE_SUCCESS; case 1: - pCmd->command = TSDB_SQL_SERV_VERSION; + pQueryInfo->command = TSDB_SQL_SERV_VERSION; return TSDB_CODE_SUCCESS; case 2: - pCmd->command = TSDB_SQL_SERV_STATUS; + pQueryInfo->command = TSDB_SQL_SERV_STATUS; return TSDB_CODE_SUCCESS; case 3: - pCmd->command = TSDB_SQL_CLI_VERSION; + pQueryInfo->command = TSDB_SQL_CLI_VERSION; return TSDB_CODE_SUCCESS; case 4: - pCmd->command = TSDB_SQL_CURRENT_USER; + pQueryInfo->command = TSDB_SQL_CURRENT_USER; return TSDB_CODE_SUCCESS; - default: { - return invalidSqlErrMsg(pCmd, msg3); - } + default: { return invalidSqlErrMsg(pQueryInfo->msg, msg3); } } } // can only perform the parameters based on the macro definitation -int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg *pCreate) { +int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) { char msg[512] = {0}; - + if (pCreate->commitLog != -1 && (pCreate->commitLog < 0 || pCreate->commitLog > 1)) { snprintf(msg, tListLen(msg), "invalid db option commitLog: %d, only 0 or 1 allowed", pCreate->commitLog); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + if (pCreate->replications != -1 && (pCreate->replications < TSDB_REPLICA_MIN_NUM || pCreate->replications > TSDB_REPLICA_MAX_NUM)) { - snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications, TSDB_REPLICA_MIN_NUM, - TSDB_REPLICA_MAX_NUM); - return invalidSqlErrMsg(pCmd, msg); + snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications, + TSDB_REPLICA_MIN_NUM, TSDB_REPLICA_MAX_NUM); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + int32_t val = htonl(pCreate->daysPerFile); if (val != -1 && (val < TSDB_FILE_MIN_PARTITION_RANGE || val > TSDB_FILE_MAX_PARTITION_RANGE)) { snprintf(msg, tListLen(msg), "invalid db option daysPerFile: %d valid range: [%d, %d]", val, TSDB_FILE_MIN_PARTITION_RANGE, TSDB_FILE_MAX_PARTITION_RANGE); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + val = htonl(pCreate->rowsInFileBlock); if (val != -1 && (val < TSDB_MIN_ROWS_IN_FILEBLOCK || val > TSDB_MAX_ROWS_IN_FILEBLOCK)) { snprintf(msg, tListLen(msg), "invalid db option rowsInFileBlock: %d valid range: [%d, %d]", val, TSDB_MIN_ROWS_IN_FILEBLOCK, TSDB_MAX_ROWS_IN_FILEBLOCK); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + val = htonl(pCreate->cacheBlockSize); if (val != -1 && (val < TSDB_MIN_CACHE_BLOCK_SIZE || val > TSDB_MAX_CACHE_BLOCK_SIZE)) { snprintf(msg, tListLen(msg), "invalid db option cacheBlockSize: %d valid range: [%d, %d]", val, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MAX_CACHE_BLOCK_SIZE); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + val = htonl(pCreate->maxSessions); if (val != -1 && (val < TSDB_MIN_TABLES_PER_VNODE || val > TSDB_MAX_TABLES_PER_VNODE)) { - snprintf(msg, tListLen(msg), "invalid db option maxSessions: %d valid range: [%d, %d]", val, TSDB_MIN_TABLES_PER_VNODE, - TSDB_MAX_TABLES_PER_VNODE); - return invalidSqlErrMsg(pCmd, msg); + snprintf(msg, tListLen(msg), "invalid db option maxSessions: %d valid range: [%d, %d]", val, + TSDB_MIN_TABLES_PER_VNODE, TSDB_MAX_TABLES_PER_VNODE); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - - if (pCreate->precision != -1 && - (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO)) { - snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision, TSDB_TIME_PRECISION_MILLI, - TSDB_TIME_PRECISION_MICRO); - return invalidSqlErrMsg(pCmd, msg); + + if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) { + snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision, + TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + if (pCreate->cacheNumOfBlocks.fraction != -1 && (pCreate->cacheNumOfBlocks.fraction < TSDB_MIN_AVG_BLOCKS || - pCreate->cacheNumOfBlocks.fraction > TSDB_MAX_AVG_BLOCKS)) { - snprintf(msg, tListLen(msg), "invalid db option ablocks: %f valid value: [%d, %d]", pCreate->cacheNumOfBlocks.fraction, - TSDB_MIN_AVG_BLOCKS, TSDB_MAX_AVG_BLOCKS); - return invalidSqlErrMsg(pCmd, msg); + pCreate->cacheNumOfBlocks.fraction > TSDB_MAX_AVG_BLOCKS)) { + snprintf(msg, tListLen(msg), "invalid db option ablocks: %f valid value: [%d, %d]", + pCreate->cacheNumOfBlocks.fraction, TSDB_MIN_AVG_BLOCKS, TSDB_MAX_AVG_BLOCKS); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + val = htonl(pCreate->commitTime); if (val != -1 && (val < TSDB_MIN_COMMIT_TIME_INTERVAL || val > TSDB_MAX_COMMIT_TIME_INTERVAL)) { snprintf(msg, tListLen(msg), "invalid db option commitTime: %d valid range: [%d, %d]", val, TSDB_MIN_COMMIT_TIME_INTERVAL, TSDB_MAX_COMMIT_TIME_INTERVAL); - return invalidSqlErrMsg(pCmd, msg); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - + if (pCreate->compression != -1 && (pCreate->compression < TSDB_MIN_COMPRESSION_LEVEL || pCreate->compression > TSDB_MAX_COMPRESSION_LEVEL)) { - snprintf(msg, tListLen(msg), "invalid db option compression: %d valid range: [%d, %d]", pCreate->compression, TSDB_MIN_COMPRESSION_LEVEL, - TSDB_MAX_COMPRESSION_LEVEL); - return invalidSqlErrMsg(pCmd, msg); + snprintf(msg, tListLen(msg), "invalid db option compression: %d valid range: [%d, %d]", pCreate->compression, + TSDB_MIN_COMPRESSION_LEVEL, TSDB_MAX_COMPRESSION_LEVEL); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); + } + + return TSDB_CODE_SUCCESS; +} + +// for debug purpose +void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex); + + if (pQueryInfo->exprsInfo.numOfExprs == 0) { + return; } + + int32_t totalBufSize = 1024; + char str[1024] = {0}; + int32_t offset = 0; + + offset += sprintf(str, "num:%d [", pQueryInfo->exprsInfo.numOfExprs); + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + + char tmpBuf[1024] = {0}; + int32_t tmpLen = 0; + tmpLen = sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].aName, pExpr->uid, pExpr->colInfo.colId); + if (tmpLen + offset > totalBufSize) break; + + offset += sprintf(str + offset, "%s", tmpBuf); + + if (i < pQueryInfo->exprsInfo.numOfExprs - 1) { + str[offset++] = ','; + } + } + + str[offset] = ']'; + tscTrace("%p select clause:%s", pSql, str); +} + +int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo) { + const char* msg1 = "invalid table name"; + const char* msg2 = "table name too long"; + + SSqlCmd* pCmd = &pSql->cmd; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; + + tFieldList* pFieldList = pCreateTable->colInfo.pColumns; + tFieldList* pTagList = pCreateTable->colInfo.pTagColumns; + + assert(pFieldList != NULL); + + // if sql specifies db, use it, otherwise use default db + SSQLToken* pzTableName = &(pCreateTable->name); + + if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (setMeterID(pMeterMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + if (!validateTableColumnInfo(pFieldList, pCmd) || + (pTagList != NULL && !validateTagParams(pTagList, pFieldList, pCmd))) { + return TSDB_CODE_INVALID_SQL; + } + + int32_t col = 0; + for (; col < pFieldList->nField; ++col) { + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, col, &pFieldList->p[col]); + } + + pCmd->numOfCols = (int16_t)pFieldList->nField; + + if (pTagList != NULL) { // create metric[optional] + for (int32_t i = 0; i < pTagList->nField; ++i) { + tscFieldInfoSetValFromField(&pQueryInfo->fieldsInfo, col++, &pTagList->p[i]); + } + + pCmd->count = pTagList->nField; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { + const char* msg1 = "invalid table name"; + const char* msg3 = "tag value too long"; + const char* msg4 = "illegal value or data overflow"; + const char* msg5 = "tags number not matched"; + + SSqlCmd* pCmd = &pSql->cmd; + + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + // two table: the first one is for current table, and the secondary is for the super table. + tscAddEmptyMeterMetaInfo(pQueryInfo); + assert(pQueryInfo->numOfTables == 2); + + const int32_t TABLE_INDEX = 0; + const int32_t STABLE_INDEX = 1; + + SMeterMetaInfo* pStableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, STABLE_INDEX); + + // super table name, create table by using dst + SSQLToken* pToken = &(pCreateTable->usingInfo.stableName); + + if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (setMeterID(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + // get meter meta from mnode + strncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, TSDB_METER_ID_LEN); + tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; + + int32_t code = tscGetMeterMeta(pSql, pStableMeterMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pStableMeterMetaInfo->pMeterMeta->numOfTags != pList->nExpr) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + // too long tag values will return invalid sql, not be truncated automatically + SSchema* pTagSchema = tsGetTagSchema(pStableMeterMetaInfo->pMeterMeta); + + char* tagVal = pCreateTable->usingInfo.tagdata.data; + for (int32_t i = 0; i < pList->nExpr; ++i) { + int32_t ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type); + if (ret != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + // validate the length of binary + if ((pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) && + pList->a[i].pVar.nLen > pTagSchema[i].bytes) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + tagVal += pTagSchema[i].bytes; + } + + // table name + if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + SMeterMetaInfo* pTableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, TABLE_INDEX); + int32_t ret = setMeterID(pTableMeterMetaInfo, &pInfo->pCreateTableInfo->name, pSql); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { + const char* msg1 = "invalid table name"; + const char* msg2 = "table name too long"; + const char* msg3 = "fill only available for interval query"; + const char* msg4 = "fill option not supported in stream computing"; + const char* msg5 = "sql too long"; // todo ADD support + + SSqlCmd* pCmd = &pSql->cmd; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + assert(pQueryInfo->numOfTables == 1); + + SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + // if sql specifies db, use it, otherwise use default db + SSQLToken* pzTableName = &(pCreateTable->name); + SQuerySQL* pQuerySql = pCreateTable->pSelect; + + if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; + tVariant* pVar = &pSrcMeterName->a[0].pVar; + + SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING}; + if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } + + if (setMeterID(pMeterMetaInfo, &srcToken, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); + } + + int32_t code = tscGetMeterMeta(pSql, pMeterMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + bool isSTable = UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); + if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + if (pQuerySql->pWhere != NULL) { // query condition in stream computing + if (parseWhereClause(pQueryInfo, &pQuerySql->pWhere, pSql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set interval value + if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } else { + if ((pQueryInfo->nAggTimeInterval > 0) && + (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set the created table[stream] name + if (setMeterID(pMeterMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(pQueryInfo->msg, msg1); + } + + if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) { + return invalidSqlErrMsg(pQueryInfo->msg, msg5); + } + + if (tsRewriteFieldNameIfNecessary(pQueryInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; + + if (validateSqlFunctionInStreamSql(pQueryInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + /* + * check if fill operation is available, the fill operation is parsed and executed during query execution, + * not here. + */ + if (pQuerySql->fillType != NULL) { + if (pQueryInfo->nAggTimeInterval == 0) { + return invalidSqlErrMsg(pQueryInfo->msg, msg3); + } + + tVariantListItem* pItem = &pQuerySql->fillType->a[0]; + if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { + if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || + (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { + return invalidSqlErrMsg(pQueryInfo->msg, msg4); + } + } + } + + // set the number of stream table columns + pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; return TSDB_CODE_SUCCESS; } + +int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { + assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); + + const char* msg0 = "invalid table name"; + const char* msg1 = "table name too long"; + const char* msg2 = "point interpolation query needs timestamp"; + const char* msg5 = "fill only available for interval query"; + const char* msg6 = "start(end) time of query range required or time range too large"; + const char* msg7 = "illegal number of tables in from clause"; + const char* msg8 = "too many columns in selection clause"; + const char* msg9 = "TWA query requires both the start and end time"; + + int32_t code = TSDB_CODE_SUCCESS; + + SSqlCmd* pCmd = &pSql->cmd; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (pMeterMetaInfo == NULL) { + pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); + } + + // too many result columns not support order by in query + if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); + } + + /* + * handle the sql expression without from subclause + * select current_database(); + * select server_version(); + * select client_version(); + * select server_state(); + */ + if (pQuerySql->from == NULL) { + assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL && + pQuerySql->pSortOrder == NULL); + return doLocalQueryProcess(pQueryInfo, pQuerySql); + } + + if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + + pQueryInfo->command = TSDB_SQL_SELECT; + + // set all query tables, which are maybe more than one. + for (int32_t i = 0; i < pQuerySql->from->nExpr; ++i) { + tVariant* pTableItem = &pQuerySql->from->a[i].pVar; + + if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); + } + + pTableItem->nLen = strdequote(pTableItem->pz); + + SSQLToken tableName = {.z = pTableItem->pz, .n = pTableItem->nLen, .type = TK_STRING}; + if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); + } + + if (pQueryInfo->numOfTables <= i) { // more than one table + tscAddEmptyMeterMetaInfo(pQueryInfo); + } + + SMeterMetaInfo* pMeterInfo1 = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + + SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; + if (setMeterID(pMeterInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + code = tscGetMeterMeta(pSql, pMeterInfo1); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr); + + // parse the group by clause in the first place + if (parseGroupbyClause(pQueryInfo, pQuerySql->pGroupby, pCmd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + bool isSTable = UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); + if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set interval value + if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } else { + if ((pQueryInfo->nAggTimeInterval > 0) && + (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set order by info + if (parseOrderbyClause(pQueryInfo, pQuerySql, tsGetSchema(pMeterMetaInfo->pMeterMeta)) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set where info + if (pQuerySql->pWhere != NULL) { + if (parseWhereClause(pQueryInfo, &pQuerySql->pWhere, pSql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + pQuerySql->pWhere = NULL; + + if (pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { + pQueryInfo->stime = pQueryInfo->stime / 1000; + pQueryInfo->etime = pQueryInfo->etime / 1000; + } + } else { // set the time rang + pQueryInfo->stime = 0; + pQueryInfo->etime = INT64_MAX; + } + + // user does not specified the query time window, twa is not allowed in such case. + if ((pQueryInfo->stime == 0 || pQueryInfo->etime == INT64_MAX || + (pQueryInfo->etime == INT64_MAX / 1000 && pMeterMetaInfo->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI)) && + tscIsTWAQuery(pQueryInfo)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); + } + + // no result due to invalid query time range + if (pQueryInfo->stime > pQueryInfo->etime) { + pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } + + if (!hasTimestampForPointInterpQuery(pQueryInfo)) { + return invalidSqlErrMsg(pQueryInfo->msg, msg2); + } + + // in case of join query, time range is required. + if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { + int64_t timeRange = labs(pQueryInfo->stime - pQueryInfo->etime); + + if (timeRange == 0 && pQueryInfo->stime == 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + } + + if ((code = parseLimitClause(pQueryInfo, index, pQuerySql, pSql)) != TSDB_CODE_SUCCESS) { + return code; + } + + if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) { + return code; + } + + setColumnOffsetValueInResultset(pQueryInfo); + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + updateTagColumnIndex(pQueryInfo, i); + } + + /* + * fill options are set at the end position, when all columns are set properly + * the columns may be increased due to group by operation + */ + if (pQuerySql->fillType != NULL) { + if (pQueryInfo->nAggTimeInterval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + if (pQueryInfo->nAggTimeInterval > 0) { + int64_t timeRange = labs(pQueryInfo->stime - pQueryInfo->etime); + // number of result is not greater than 10,000,000 + if ((timeRange == 0) || (timeRange / pQueryInfo->nAggTimeInterval) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) { + return invalidSqlErrMsg(pQueryInfo->msg, msg6); + } + } + + int32_t ret = parseFillClause(pQueryInfo, pQuerySql); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } + + return TSDB_CODE_SUCCESS; // Does not build query message here +} + +bool hasDefaultQueryTimeRange(SQueryInfo *pQueryInfo) { + return (pQueryInfo->stime == 0 && pQueryInfo->etime == INT64_MAX) || + (pQueryInfo->stime == INT64_MAX && pQueryInfo->etime == 0); +} diff --git a/src/client/src/tscSQLParserImpl.c b/src/client/src/tscSQLParserImpl.c index cc4375fb03896b240bc57fdabcf2728ade996329..17e1c6f45790acc36c35111aec75e00be227044f 100644 --- a/src/client/src/tscSQLParserImpl.c +++ b/src/client/src/tscSQLParserImpl.c @@ -24,7 +24,7 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) { void *pParser = ParseAlloc(malloc); - pSQLInfo->validSql = true; + pSQLInfo->valid = true; int32_t i = 0; while (1) { @@ -50,12 +50,12 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) { } case TK_ILLEGAL: { snprintf(pSQLInfo->pzErrMsg, tListLen(pSQLInfo->pzErrMsg), "unrecognized token: \"%s\"", t0.z); - pSQLInfo->validSql = false; + pSQLInfo->valid = false; goto abort_parse; } default: Parse(pParser, t0.type, t0, pSQLInfo); - if (pSQLInfo->validSql == false) { + if (pSQLInfo->valid == false) { goto abort_parse; } } @@ -554,58 +554,64 @@ tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExp return pList; } -void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList) { - SInsertSQL *pInsert = calloc(1, sizeof(SInsertSQL)); - - pInsert->name = *pName; - pInsert->pValue = pList; - - pInfo->pInsertInfo = pInsert; - pInfo->sqlType = TSQL_INSERT; -} - -void destroyQuerySql(SQuerySQL *pSql) { - if (pSql == NULL) return; - - tSQLExprListDestroy(pSql->pSelection); - pSql->pSelection = NULL; - - tSQLExprDestroy(pSql->pWhere); - pSql->pWhere = NULL; - - tVariantListDestroy(pSql->pSortOrder); - pSql->pSortOrder = NULL; - - tVariantListDestroy(pSql->pGroupby); - pSql->pGroupby = NULL; - - tVariantListDestroy(pSql->from); - pSql->from = NULL; - - tVariantListDestroy(pSql->fillType); +void doDestroyQuerySql(SQuerySQL *pQuerySql) { + if (pQuerySql == NULL) { + return; + } + + tSQLExprListDestroy(pQuerySql->pSelection); + + pQuerySql->pSelection = NULL; + + tSQLExprDestroy(pQuerySql->pWhere); + pQuerySql->pWhere = NULL; + + tVariantListDestroy(pQuerySql->pSortOrder); + pQuerySql->pSortOrder = NULL; + + tVariantListDestroy(pQuerySql->pGroupby); + pQuerySql->pGroupby = NULL; + + tVariantListDestroy(pQuerySql->from); + pQuerySql->from = NULL; + + tVariantListDestroy(pQuerySql->fillType); + + free(pQuerySql); +} + +void destroyAllSelectClause(SSubclauseInfo *pClause) { + if (pClause == NULL || pClause->numOfClause == 0) { + return; + } - free(pSql); + for(int32_t i = 0; i < pClause->numOfClause; ++i) { + SQuerySQL *pQuerySql = pClause->pClause[i]; + doDestroyQuerySql(pQuerySql); + } + + tfree(pClause->pClause); } -SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName, +SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pStableName, tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type) { SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL)); switch (type) { - case TSQL_CREATE_NORMAL_METER: { + case TSQL_CREATE_TABLE: { pCreate->colInfo.pColumns = pCols; assert(pTagVals == NULL && pTags == NULL); break; } - case TSQL_CREATE_NORMAL_METRIC: { + case TSQL_CREATE_STABLE: { pCreate->colInfo.pColumns = pCols; pCreate->colInfo.pTagColumns = pTags; assert(pTagVals == NULL && pTags != NULL && pCols != NULL); break; } - case TSQL_CREATE_METER_FROM_METRIC: { + case TSQL_CREATE_TABLE_FROM_STABLE: { pCreate->usingInfo.pTagVals = pTagVals; - pCreate->usingInfo.metricName = *pMetricName; + pCreate->usingInfo.stableName = *pStableName; break; } case TSQL_CREATE_STREAM: { @@ -616,19 +622,24 @@ SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLTo assert(false); } + pCreate->type = type; return pCreate; } SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type) { SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL)); + pAlterTable->name = *pMeterName; + pAlterTable->type = type; - if (type == ALTER_TABLE_ADD_COLUMN || type == ALTER_TABLE_TAGS_ADD) { + if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { pAlterTable->pAddColumns = pCols; assert(pVals == NULL); } else { - /* ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP, - * ALTER_TABLE_DROP_COLUMN */ + /* + * ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP, + * ALTER_TABLE_DROP_COLUMN + */ pAlterTable->varList = pVals; assert(pCols == NULL); } @@ -639,27 +650,28 @@ SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tV void SQLInfoDestroy(SSqlInfo *pInfo) { if (pInfo == NULL) return; - if (pInfo->sqlType == TSQL_QUERY_METER) { - destroyQuerySql(pInfo->pQueryInfo); - } else if (pInfo->sqlType >= TSQL_CREATE_NORMAL_METER && pInfo->sqlType <= TSQL_CREATE_STREAM) { + if (pInfo->type == TSDB_SQL_SELECT) { + destroyAllSelectClause(&pInfo->subclauseInfo); + } else if (pInfo->type == TSDB_SQL_CREATE_TABLE) { SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; - destroyQuerySql(pCreateTableInfo->pSelect); + doDestroyQuerySql(pCreateTableInfo->pSelect); tFieldListDestroy(pCreateTableInfo->colInfo.pColumns); tFieldListDestroy(pCreateTableInfo->colInfo.pTagColumns); tVariantListDestroy(pCreateTableInfo->usingInfo.pTagVals); tfree(pInfo->pCreateTableInfo); - } else if (pInfo->sqlType >= ALTER_TABLE_TAGS_ADD && pInfo->sqlType <= ALTER_TABLE_DROP_COLUMN) { + } else if (pInfo->type == TSDB_SQL_ALTER_TABLE) { tVariantListDestroy(pInfo->pAlterInfo->varList); tFieldListDestroy(pInfo->pAlterInfo->pAddColumns); + tfree(pInfo->pAlterInfo); } else { if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) { free(pInfo->pDCLInfo->a); } - if (pInfo->sqlType == CREATE_DATABASE) { + if (pInfo->type == TSDB_SQL_CREATE_DB) { tVariantListDestroy(pInfo->pDCLInfo->dbOpt.keep); } @@ -667,13 +679,52 @@ void SQLInfoDestroy(SSqlInfo *pInfo) { } } -void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) { - pInfo->sqlType = type; - pInfo->pCreateTableInfo = pSqlExprInfo; +SSubclauseInfo* setSubclause(SSubclauseInfo* pSubclause, void *pSqlExprInfo) { + if (pSubclause == NULL) { + pSubclause = calloc(1, sizeof(SSubclauseInfo)); + } + + int32_t newSize = pSubclause->numOfClause + 1; + char* tmp = realloc(pSubclause->pClause, newSize * POINTER_BYTES); + if (tmp == NULL) { + return pSubclause; + } + + pSubclause->pClause = (SQuerySQL**) tmp; + + pSubclause->pClause[newSize - 1] = pSqlExprInfo; + pSubclause->numOfClause++; + + return pSubclause; +} +SSqlInfo* setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) { + pInfo->type = type; + + if (type == TSDB_SQL_SELECT) { + pInfo->subclauseInfo = *(SSubclauseInfo*) pSqlExprInfo; + free(pSqlExprInfo); + } else { + pInfo->pCreateTableInfo = pSqlExprInfo; + } + if (pMeterName != NULL) { pInfo->pCreateTableInfo->name = *pMeterName; } + + return pInfo; +} + +SSubclauseInfo* appendSelectClause(SSubclauseInfo *pQueryInfo, void *pSubclause) { + char* tmp = realloc(pQueryInfo->pClause, (pQueryInfo->numOfClause + 1) * POINTER_BYTES); + if (tmp == NULL) { // out of memory + return pQueryInfo; + } + + pQueryInfo->pClause = (SQuerySQL**) tmp; + pQueryInfo->pClause[pQueryInfo->numOfClause++] = pSubclause; + + return pQueryInfo; } void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists) { @@ -703,23 +754,57 @@ tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken) { } void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) { - pInfo->sqlType = type; + pInfo->type = type; if (nParam == 0) return; - if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = (tDCLSQL *)calloc(1, sizeof(tDCLSQL)); va_list va; va_start(va, nParam); while (nParam-- > 0) { SSQLToken *pToken = va_arg(va, SSQLToken *); - tTokenListAppend(pInfo->pDCLInfo, pToken); + (void)tTokenListAppend(pInfo->pDCLInfo, pToken); } va_end(va); } +void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck) { + pInfo->type = type; + + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + tTokenListAppend(pInfo->pDCLInfo, pToken); + pInfo->pDCLInfo->existsCheck = (existsCheck->n == 1); +} + +void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns) { + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + pInfo->type = TSDB_SQL_SHOW; + + SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt; + pShowInfo->showType = type; + + if (prefix != NULL && prefix->type != 0) { + pShowInfo->prefix = *prefix; + } else { + pShowInfo->prefix.type = 0; + } + + if (pPatterns != NULL && pPatterns->type != 0) { + pShowInfo->pattern = *pPatterns; + } else { + pShowInfo->pattern.type = 0; + } +} + void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists) { - pInfo->sqlType = type; + pInfo->type = type; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); } @@ -731,18 +816,67 @@ void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBI } void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo) { - pInfo->sqlType = type; + pInfo->type = type; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); } pInfo->pDCLInfo->acctOpt = *pAcctInfo; + + assert(pName != NULL); + pInfo->pDCLInfo->user.user = *pName; + + if (pPwd != NULL) { + pInfo->pDCLInfo->user.passwd = *pPwd; + } +} + +void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd) { + pInfo->type = TSDB_SQL_CREATE_USER; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + assert(pName != NULL && pPasswd != NULL); + + pInfo->pDCLInfo->user.user = *pName; + pInfo->pDCLInfo->user.passwd = *pPasswd; +} - tTokenListAppend(pInfo->pDCLInfo, pName); +void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege) { + pInfo->type = TSDB_SQL_ALTER_USER; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + assert(pName != NULL); + + SUserInfo* pUser = &pInfo->pDCLInfo->user; + pUser->type = type; + pUser->user = *pName; + + if (pPwd != NULL) { + pUser->passwd = *pPwd; + } else { + pUser->passwd.type = TSDB_DATA_TYPE_NULL; + } + + if (pPrivilege != NULL) { + pUser->privilege = *pPrivilege; + } else { + pUser->privilege.type = TSDB_DATA_TYPE_NULL; + } +} - if (pPwd->n > 0) { - tTokenListAppend(pInfo->pDCLInfo, pPwd); +void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip) { + pInfo->type = type; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); } + + assert(ip != NULL); + + pInfo->pDCLInfo->ip = *ip; } void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) { diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index fdbad2bbf8250ef21a1577ef37fefb528c40fea8..648c25657cf4e60549ed9a60b702aea9d8f1445a 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -83,6 +83,13 @@ struct SSchema* tsGetColumnSchema(SMeterMeta* pMeta, int32_t startCol) { return (SSchema*)(((char*)pMeta + sizeof(SMeterMeta)) + startCol * sizeof(SSchema)); } +struct SSchema tsGetTbnameColumnSchema() { + struct SSchema s = {.colId = TSDB_TBNAME_COLUMN_INDEX, .type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; + strcpy(s.name, TSQL_TBNAME_L); + + return s; +} + /** * the MeterMeta data format in memory is as follows: * @@ -123,36 +130,40 @@ bool tsMeterMetaIdentical(SMeterMeta* p1, SMeterMeta* p2) { return memcmp(p1, p2, size) == 0; } -//todo refactor -static FORCE_INLINE char* skipSegments(char* input, char delimiter, int32_t num) { +// todo refactor +static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) { for (int32_t i = 0; i < num; ++i) { - while (*input != 0 && *input++ != delimiter) { + while (*input != 0 && *input++ != delim) { }; } return input; } -static FORCE_INLINE void copySegment(char* dst, char* src, char delimiter) { +static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) { + size_t len = 0; while (*src != delimiter && *src != 0) { *dst++ = *src++; + len++; } + + return len; } /** - * extract meter name from meterid, which the format of userid.dbname.metername + * extract table name from meterid, which the format of userid.dbname.metername * @param meterId * @return */ -void extractMeterName(char* meterId, char* name) { +void extractTableName(char* meterId, char* name) { char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 2); - copySegment(name, r, TS_PATH_DELIMITER[0]); + copy(name, r, TS_PATH_DELIMITER[0]); } SSQLToken extractDBName(char* meterId, char* name) { char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 1); - copySegment(name, r, TS_PATH_DELIMITER[0]); + size_t len = copy(name, r, TS_PATH_DELIMITER[0]); - SSQLToken token = {.z = name, .n = strlen(name), .type = TK_STRING}; + SSQLToken token = {.z = name, .n = len, .type = TK_STRING}; return token; } diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c index 5eebddca7319920e6252ffc279f3e15cdd73225f..ca57030539a451d4967ace21fc688a1e44ffea76 100644 --- a/src/client/src/tscSecondaryMerge.c +++ b/src/client/src/tscSecondaryMerge.c @@ -58,21 +58,25 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu * the fields and offset attributes in pCmd and pModel may be different due to * merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object. */ - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { SQLFunctionCtx *pCtx = &pReducer->pCtx[i]; - pCtx->aOutputBuf = pReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pReducer->resColModel->maxCapacity; - pCtx->order = pCmd->order.order; - pCtx->functionId = pCmd->exprsInfo.pExprs[i].functionId; + pCtx->aOutputBuf = pReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pReducer->resColModel->capacity; + pCtx->order = pQueryInfo->order.order; + pCtx->functionId = pQueryInfo->exprsInfo.pExprs[i].functionId; // input buffer hold only one point data - pCtx->aInputElemBuf = pReducer->pTempBuffer->data + pDesc->pSchema->colOffset[i]; + int16_t offset = getColumnModelOffset(pDesc->pColumnModel, i); + SSchema* pSchema = getColumnModelSchema(pDesc->pColumnModel, i); + + pCtx->aInputElemBuf = pReducer->pTempBuffer->data + offset; // input data format comes from pModel - pCtx->inputType = pDesc->pSchema->pFields[i].type; - pCtx->inputBytes = pDesc->pSchema->pFields[i].bytes; + pCtx->inputType = pSchema->type; + pCtx->inputBytes = pSchema->bytes; - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); // output data format yet comes from pCmd. pCtx->outputBytes = pField->bytes; pCtx->outputType = pField->type; @@ -84,15 +88,15 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu pRes->bytes[i] = pField->bytes; - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); // for top/bottom function, the output of timestamp is the first column int32_t functionId = pExpr->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf; - pCtx->param[2].i64Key = pCmd->order.order; + pCtx->param[2].i64Key = pQueryInfo->order.order; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; - pCtx->param[1].i64Key = pCmd->order.orderColId; + pCtx->param[1].i64Key = pQueryInfo->order.orderColId; } SResultInfo *pResInfo = &pReducer->pResInfo[i]; @@ -105,11 +109,11 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu int16_t n = 0; int16_t tagLen = 0; - SQLFunctionCtx** pTagCtx = calloc(pCmd->fieldsInfo.numOfOutputCols, POINTER_BYTES); + SQLFunctionCtx** pTagCtx = calloc(pQueryInfo->fieldsInfo.numOfOutputCols, POINTER_BYTES); SQLFunctionCtx* pCtx = NULL; - for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for(int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TAG_DUMMY || pExpr->functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pExpr->resBytes; pTagCtx[n++] = &pReducer->pCtx[i]; @@ -131,11 +135,11 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu * todo release allocated memory process with async process */ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, - tColModel *finalmodel, SSqlCmd *pCmd, SSqlRes *pRes) { + SColumnModel *finalmodel, SSqlCmd *pCmd, SSqlRes *pRes) { // offset of cmd in SSqlObj structure char *pSqlObjAddr = (char *)pCmd - offsetof(SSqlObj, cmd); - if (pMemBuffer == NULL || pDesc->pSchema == NULL) { + if (pMemBuffer == NULL || pDesc->pColumnModel == NULL) { tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); tscError("%p no local buffer or intermediate result format model", pSqlObjAddr); @@ -161,9 +165,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd return; } - if (pDesc->pSchema->maxCapacity >= pMemBuffer[0]->nPageSize) { - tscError("%p Invalid value of buffer capacity %d and page size %d ", pSqlObjAddr, pDesc->pSchema->maxCapacity, - pMemBuffer[0]->nPageSize); + if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) { + tscError("%p Invalid value of buffer capacity %d and page size %d ", pSqlObjAddr, pDesc->pColumnModel->capacity, + pMemBuffer[0]->pageSize); tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); pRes->code = TSDB_CODE_APP_ERROR; @@ -195,7 +199,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd int32_t numOfFlushoutInFile = pMemBuffer[i]->fileMeta.flushoutData.nLength; for (int32_t j = 0; j < numOfFlushoutInFile; ++j) { - SLocalDataSource *pDS = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->nPageSize); + SLocalDataSource *pDS = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize); if (pDS == NULL) { tscError("%p failed to create merge structure", pSqlObjAddr); pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; @@ -212,11 +216,13 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSqlObjAddr, i + 1, idx + 1); tExtMemBufferLoadData(pMemBuffer[i], &(pDS->filePage), j, 0); #ifdef _DEBUG_VIEW - printf("load data page into mem for build loser tree: %ld rows\n", pDS->filePage.numOfElems); + printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", pDS->filePage.numOfElems); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, pCmd); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - tColModelDisplayEx(pDesc->pSchema, pDS->filePage.data, pDS->filePage.numOfElems, pMemBuffer[0]->numOfElemsPerPage, + tscGetSrcColumnInfo(colInfo, pQueryInfo); + + tColModelDisplayEx(pDesc->pColumnModel, pDS->filePage.data, pDS->filePage.numOfElems, pMemBuffer[0]->numOfElemsPerPage, colInfo); #endif if (pDS->filePage.numOfElems == 0) { // no data in this flush @@ -238,7 +244,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd param->pLocalData = pReducer->pLocalDataSrc; param->pDesc = pReducer->pDesc; param->numOfElems = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; - param->groupOrderType = pCmd->groupbyExpr.orderType; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + param->groupOrderType = pQueryInfo->groupbyExpr.orderType; pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); if (pReducer->pLoserTree == NULL || pRes->code != 0) { @@ -247,14 +255,14 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd // the input data format follows the old format, but output in a new format. // so, all the input must be parsed as old format - pReducer->pCtx = (SQLFunctionCtx *)calloc(pCmd->fieldsInfo.numOfOutputCols, sizeof(SQLFunctionCtx)); + pReducer->pCtx = (SQLFunctionCtx *)calloc(pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(SQLFunctionCtx)); pReducer->rowSize = pMemBuffer[0]->nElemSize; - tscRestoreSQLFunctionForMetricQuery(pCmd); - tscFieldInfoCalOffset(pCmd); + tscRestoreSQLFunctionForMetricQuery(pQueryInfo); + tscFieldInfoCalOffset(pQueryInfo); - if (pReducer->rowSize > pMemBuffer[0]->nPageSize) { + if (pReducer->rowSize > pMemBuffer[0]->pageSize) { assert(false); // todo fixed row size is larger than the minimum page size; } @@ -269,15 +277,15 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pReducer->discardData = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage)); pReducer->discard = false; - pReducer->nResultBufSize = pMemBuffer[0]->nPageSize * 16; + pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16; pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage)); - int32_t finalRowLength = tscGetResRowLength(pCmd); + int32_t finalRowLength = tscGetResRowLength(pQueryInfo); pReducer->resColModel = finalmodel; - pReducer->resColModel->maxCapacity = pReducer->nResultBufSize / finalRowLength; + pReducer->resColModel->capacity = pReducer->nResultBufSize / finalRowLength; assert(finalRowLength <= pReducer->rowSize); - pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->maxCapacity); + pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity); pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize); if (pReducer->pTempBuffer == NULL|| pReducer->discardData == NULL || pReducer->pResultBuf == NULL || @@ -294,34 +302,42 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd } pReducer->pTempBuffer->numOfElems = 0; - pReducer->pResInfo = calloc((size_t)pCmd->fieldsInfo.numOfOutputCols, sizeof(SResultInfo)); + pReducer->pResInfo = calloc((size_t)pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(SResultInfo)); - tscCreateResPointerInfo(pCmd, pRes); + tscCreateResPointerInfo(pRes, pQueryInfo); tscInitSqlContext(pCmd, pRes, pReducer, pDesc); - // we change the maxCapacity of schema to denote that there is only one row in temp buffer - pReducer->pDesc->pSchema->maxCapacity = 1; - pReducer->offset = pCmd->limit.offset; - + // we change the capacity of schema to denote that there is only one row in temp buffer + pReducer->pDesc->pColumnModel->capacity = 1; + + //restore the limitation value at the last stage + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + pQueryInfo->limit.limit = pQueryInfo->clauseLimit; + pQueryInfo->limit.offset = pQueryInfo->prjOffset; + } + + pReducer->offset = pQueryInfo->limit.offset; + pRes->pLocalReducer = pReducer; pRes->numOfGroups = 0; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int16_t prec = pMeterMetaInfo->pMeterMeta->precision; - int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; - int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, prec); + int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime; + int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, prec); SInterpolationInfo *pInterpoInfo = &pReducer->interpolationInfo; - taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupCols, + taosInitInterpoInfo(pInterpoInfo, pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, pReducer->rowSize); - int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols; + int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols; - if (pCmd->groupbyExpr.numOfGroupCols > 0) { - pInterpoInfo->pTags[0] = (char *)pInterpoInfo->pTags + POINTER_BYTES * pCmd->groupbyExpr.numOfGroupCols; - for (int32_t i = 1; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { - pInterpoInfo->pTags[i] = pReducer->resColModel->pFields[startIndex + i - 1].bytes + pInterpoInfo->pTags[i - 1]; + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { + pInterpoInfo->pTags[0] = (char *)pInterpoInfo->pTags + POINTER_BYTES * pQueryInfo->groupbyExpr.numOfGroupCols; + for (int32_t i = 1; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { + SSchema* pSchema = getColumnModelSchema(pReducer->resColModel, startIndex + i - 1); + pInterpoInfo->pTags[i] = pSchema->bytes + pInterpoInfo->pTags[i - 1]; } } else { assert(pInterpoInfo->pTags == NULL); @@ -334,16 +350,16 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor return 0; } - assert(pPage->numOfElems <= pDesc->pSchema->maxCapacity); + assert(pPage->numOfElems <= pDesc->pColumnModel->capacity); // sort before flush to disk, the data must be consecutively put on tFilePage. - if (pDesc->orderIdx.numOfOrderedCols > 0) { + if (pDesc->orderIdx.numOfCols > 0) { tColDataQSort(pDesc, pPage->numOfElems, 0, pPage->numOfElems - 1, pPage->data, orderType); } #ifdef _DEBUG_VIEW - printf("%ld rows data flushed to disk after been sorted:\n", pPage->numOfElems); - tColModelDisplay(pDesc->pSchema, pPage->data, pPage->numOfElems, pPage->numOfElems); + printf("%" PRIu64 " rows data flushed to disk after been sorted:\n", pPage->numOfElems); + tColModelDisplay(pDesc->pColumnModel, pPage->data, pPage->numOfElems, pPage->numOfElems); #endif // write to cache after being sorted @@ -371,18 +387,19 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data, int32_t numOfRows, int32_t orderType) { - if (pPage->numOfElems + numOfRows <= pDesc->pSchema->maxCapacity) { - tColModelAppend(pDesc->pSchema, pPage, data, 0, numOfRows, numOfRows); + SColumnModel *pModel = pDesc->pColumnModel; + + if (pPage->numOfElems + numOfRows <= pModel->capacity) { + tColModelAppend(pModel, pPage, data, 0, numOfRows, numOfRows); return 0; } - tColModel *pModel = pDesc->pSchema; - - int32_t numOfRemainEntries = pDesc->pSchema->maxCapacity - pPage->numOfElems; + // current buffer is overflow, flush data to extensive buffer + int32_t numOfRemainEntries = pModel->capacity - pPage->numOfElems; tColModelAppend(pModel, pPage, data, 0, numOfRemainEntries, numOfRows); - /* current buffer is full, need to flushed to disk */ - assert(pPage->numOfElems == pDesc->pSchema->maxCapacity); + // current buffer is full, need to flushed to disk + assert(pPage->numOfElems == pModel->capacity); int32_t ret = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType); if (ret != 0) { return -1; @@ -392,15 +409,15 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa while (remain > 0) { int32_t numOfWriteElems = 0; - if (remain > pModel->maxCapacity) { - numOfWriteElems = pModel->maxCapacity; + if (remain > pModel->capacity) { + numOfWriteElems = pModel->capacity; } else { numOfWriteElems = remain; } tColModelAppend(pModel, pPage, data, numOfRows - remain, numOfWriteElems, numOfRows); - if (pPage->numOfElems == pModel->maxCapacity) { + if (pPage->numOfElems == pModel->capacity) { int32_t ret = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType); if (ret != 0) { return -1; @@ -429,7 +446,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { } SSqlCmd *pCmd = &pSql->cmd; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + // there is no more result, so we release all allocated resource SLocalReducer *pLocalReducer = (SLocalReducer*)atomic_exchange_ptr(&pRes->pLocalReducer, NULL); if (pLocalReducer != NULL) { @@ -440,15 +458,18 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { tscTrace("%p waiting for delete procedure, status: %d", pSql, status); } - tfree(pLocalReducer->interpolationInfo.prevValues); - tfree(pLocalReducer->interpolationInfo.pTags); + taosDestoryInterpoInfo(&pLocalReducer->interpolationInfo); if (pLocalReducer->pCtx != NULL) { - for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + for(int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i]; + tVariantDestroy(&pCtx->tag); + if (pCtx->tagInfo.pTagCtxList != NULL) { + tfree(pCtx->tagInfo.pTagCtxList); + } } - + tfree(pLocalReducer->pCtx); } @@ -459,7 +480,7 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { tfree(pLocalReducer->pResultBuf); if (pLocalReducer->pResInfo != NULL) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { tfree(pLocalReducer->pResInfo[i].interResultBuf); } @@ -492,14 +513,16 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { tscTrace("%p free local reducer finished", pSql); } -static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, tColModel *pModel) { +static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, SColumnModel *pModel) { int32_t numOfGroupByCols = 0; - if (pCmd->groupbyExpr.numOfGroupCols > 0) { - numOfGroupByCols = pCmd->groupbyExpr.numOfGroupCols; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { + numOfGroupByCols = pQueryInfo->groupbyExpr.numOfGroupCols; } // primary timestamp column is involved in final result - if (pCmd->nAggTimeInterval != 0) { + if (pQueryInfo->nAggTimeInterval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { numOfGroupByCols++; } @@ -509,20 +532,20 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm } if (numOfGroupByCols > 0) { - int32_t startCols = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols; + int32_t startCols = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols; // tags value locate at the last columns - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { orderIdx[i] = startCols++; } - if (pCmd->nAggTimeInterval != 0) { + if (pQueryInfo->nAggTimeInterval != 0) { // the first column is the timestamp, handles queries like "interval(10m) group by tags" orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } - *pOrderDesc = tOrderDesCreate(orderIdx, numOfGroupByCols, pModel, pCmd->order.order); + *pOrderDesc = tOrderDesCreate(orderIdx, numOfGroupByCols, pModel, pQueryInfo->order.order); tfree(orderIdx); if (*pOrderDesc == NULL) { @@ -533,15 +556,23 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm } bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) { - int16_t functionId = tscSqlExprGet(pCmd, 0)->functionId; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId; // disable merge procedure for column projection query + assert(functionId != TSDB_FUNC_ARITHM); + + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + return true; + } + if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { return false; } tOrderDescriptor *pOrderDesc = pReducer->pDesc; - int32_t numOfCols = pOrderDesc->orderIdx.numOfOrderedCols; + int32_t numOfCols = pOrderDesc->orderIdx.numOfCols; // no group by columns, all data belongs to one group if (numOfCols <= 0) { @@ -550,29 +581,30 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage if (pOrderDesc->orderIdx.pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { //<= 0 // super table interval query - assert(pCmd->nAggTimeInterval > 0); - pOrderDesc->orderIdx.numOfOrderedCols -= 1; + assert(pQueryInfo->nAggTimeInterval > 0); + pOrderDesc->orderIdx.numOfCols -= 1; } else { // simple group by query - assert(pCmd->nAggTimeInterval == 0); + assert(pQueryInfo->nAggTimeInterval == 0); } // only one row exists int32_t ret = compare_a(pOrderDesc, 1, 0, pPrev, 1, 0, tmpBuffer->data); - pOrderDesc->orderIdx.numOfOrderedCols = numOfCols; + pOrderDesc->orderIdx.numOfCols = numOfCols; return (ret == 0); } int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc, - tColModel **pFinalModel, uint32_t nBufferSizes) { + SColumnModel **pFinalModel, uint32_t nBufferSizes) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; SSchema * pSchema = NULL; - tColModel *pModel = NULL; + SColumnModel *pModel = NULL; *pFinalModel = NULL; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); (*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pMeterMetaInfo->pMetricMeta->numOfVnodes); if (*pMemBuffer == NULL) { @@ -581,7 +613,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr return pRes->code; } - pSchema = (SSchema *)calloc(1, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols); + pSchema = (SSchema *)calloc(1, sizeof(SSchema) * pQueryInfo->fieldsInfo.numOfOutputCols); if (pSchema == NULL) { tscError("%p failed to allocate memory", pSql); pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; @@ -589,8 +621,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr } int32_t rlen = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); pSchema[i].bytes = pExpr->resBytes; pSchema[i].type = pExpr->resType; @@ -598,15 +630,15 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr rlen += pExpr->resBytes; } - int32_t capacity = nBufferSizes / rlen; - pModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity); + int32_t capacity = 0; + if (rlen != 0) { + capacity = nBufferSizes / rlen; + } + + pModel = createColumnModel(pSchema, pQueryInfo->fieldsInfo.numOfOutputCols, capacity); for (int32_t i = 0; i < pMeterMetaInfo->pMetricMeta->numOfVnodes; ++i) { - char tmpPath[512] = {0}; - getTmpfilePath("tv_bf_db", tmpPath); - tscTrace("%p create [%d](%d) tmp file for subquery:%s", pSql, pMeterMetaInfo->pMetricMeta->numOfVnodes, i, tmpPath); - - tExtMemBufferCreate(&(*pMemBuffer)[i], nBufferSizes, rlen, tmpPath, pModel); + (*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pModel); (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL; } @@ -615,16 +647,16 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr return pRes->code; } - memset(pSchema, 0, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + memset(pSchema, 0, sizeof(SSchema) * pQueryInfo->fieldsInfo.numOfOutputCols); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); pSchema[i].type = pField->type; pSchema[i].bytes = pField->bytes; strcpy(pSchema[i].name, pField->name); } - *pFinalModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity); + *pFinalModel = createColumnModel(pSchema, pQueryInfo->fieldsInfo.numOfOutputCols, capacity); tfree(pSchema); return TSDB_CODE_SUCCESS; @@ -636,12 +668,12 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr * @param pFinalModel * @param numOfVnodes */ -void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, tColModel *pFinalModel, +void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, int32_t numOfVnodes) { - tColModelDestroy(pFinalModel); + destroyColumnModel(pFinalModel); tOrderDescDestroy(pDesc); for (int32_t i = 0; i < numOfVnodes; ++i) { - tExtMemBufferDestroy(&pMemBuffer[i]); + pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]); } tfree(pMemBuffer); @@ -666,8 +698,8 @@ int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *p #if defined(_DEBUG_VIEW) printf("new page load to buffer\n"); - tColModelDisplay(pOneInterDataSrc->pMemBuffer->pColModel, pOneInterDataSrc->filePage.data, - pOneInterDataSrc->filePage.numOfElems, pOneInterDataSrc->pMemBuffer->pColModel->maxCapacity); + tColModelDisplay(pOneInterDataSrc->pMemBuffer->pColumnModel, pOneInterDataSrc->filePage.data, + pOneInterDataSrc->filePage.numOfElems, pOneInterDataSrc->pMemBuffer->pColumnModel->capacity); #endif *needAdjustLoserTree = true; } else { @@ -714,30 +746,31 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource * } } -void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SSqlCmd *pCmd, SInterpolationInfo *pInterpoInfo) { +void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo, SInterpolationInfo *pInterpoInfo) { // discard following dataset in the same group and reset the interpolation information - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int16_t prec = pMeterMetaInfo->pMeterMeta->precision; - int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; - int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, prec); + int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime; + int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, prec); - taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupCols, + taosInitInterpoInfo(pInterpoInfo, pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, pLocalReducer->rowSize); pLocalReducer->discard = true; pLocalReducer->discardData->numOfElems = 0; - tColModel *pModel = pLocalReducer->pDesc->pSchema; + SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel; tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1); } // todo merge with following function -static void reversedCopyResultToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage *pFinalDataPage) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); +static void reversedCopyResultToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage *pFinalDataPage) { + + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); - int32_t offset = tscFieldInfoGetOffset(pCmd, i); + int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i); char * src = pFinalDataPage->data + (pRes->numOfRows - 1) * pField->bytes + pRes->numOfRows * offset; char * dst = pRes->data + pRes->numOfRows * offset; @@ -749,13 +782,13 @@ static void reversedCopyResultToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage * } } -static void reversedCopyFromInterpolationToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage **pResPages, - SLocalReducer *pLocalReducer) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); +static void reversedCopyFromInterpolationToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage **pResPages, SLocalReducer *pLocalReducer) { + assert(0); + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); - int32_t offset = tscFieldInfoGetOffset(pCmd, i); - assert(offset == pLocalReducer->resColModel->colOffset[i]); + int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i); + assert(offset == getColumnModelOffset(pLocalReducer->resColModel, i)); char *src = pResPages[i]->data + (pRes->numOfRows - 1) * pField->bytes; char *dst = pRes->data + pRes->numOfRows * offset; @@ -776,7 +809,8 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; tFilePage *pFinalDataPage = pLocalReducer->pResultBuf; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (pRes->pLocalReducer != pLocalReducer) { /* * Release the SSqlObj is called, and it is int destroying function invoked by other thread. @@ -786,111 +820,112 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo assert(pRes->pLocalReducer == NULL); } - if (pCmd->nAggTimeInterval == 0 || pCmd->interpoType == TSDB_INTERPO_NONE) { + if (pQueryInfo->nAggTimeInterval == 0 || pQueryInfo->interpoType == TSDB_INTERPO_NONE) { // no interval query, no interpolation pRes->data = pLocalReducer->pFinalRes; pRes->numOfRows = pFinalDataPage->numOfElems; - pRes->numOfTotal += pRes->numOfRows; + pRes->numOfTotalInCurrentClause += pRes->numOfRows; - if (pCmd->limit.offset > 0) { - if (pCmd->limit.offset < pRes->numOfRows) { + if (pQueryInfo->limit.offset > 0) { + if (pQueryInfo->limit.offset < pRes->numOfRows) { int32_t prevSize = pFinalDataPage->numOfElems; - tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, pCmd->limit.offset - 1); + tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, pQueryInfo->limit.offset - 1); /* remove the hole in column model */ tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); - pRes->numOfRows -= pCmd->limit.offset; - pRes->numOfTotal -= pCmd->limit.offset; - pCmd->limit.offset = 0; + pRes->numOfRows -= pQueryInfo->limit.offset; + pRes->numOfTotalInCurrentClause -= pQueryInfo->limit.offset; + pQueryInfo->limit.offset = 0; } else { - pCmd->limit.offset -= pRes->numOfRows; + pQueryInfo->limit.offset -= pRes->numOfRows; pRes->numOfRows = 0; - pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; } } - if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) { + if (pQueryInfo->limit.limit >= 0 && pRes->numOfTotalInCurrentClause > pQueryInfo->limit.limit) { /* impose the limitation of output rows on the final result */ int32_t prevSize = pFinalDataPage->numOfElems; - int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit; + int32_t overFlow = pRes->numOfTotalInCurrentClause - pQueryInfo->limit.limit; assert(overFlow < pRes->numOfRows); - pRes->numOfTotal = pCmd->limit.limit; + pRes->numOfTotalInCurrentClause = pQueryInfo->limit.limit; pRes->numOfRows -= overFlow; pFinalDataPage->numOfElems -= overFlow; tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, &pLocalReducer->interpolationInfo); + savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, &pLocalReducer->interpolationInfo); } - int32_t rowSize = tscGetResRowLength(pCmd); + int32_t rowSize = tscGetResRowLength(pQueryInfo); // handle the descend order output - if (pCmd->order.order == TSQL_SO_ASC) { +// if (pQueryInfo->order.order == TSQL_SO_ASC) { memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * rowSize); - } else { - reversedCopyResultToDstBuf(pCmd, pRes, pFinalDataPage); - } +// } else { +// reversedCopyResultToDstBuf(pQueryInfo, pRes, pFinalDataPage); +// } pFinalDataPage->numOfElems = 0; return; } - int64_t * pPrimaryKeys = (int64_t *)pLocalReducer->pBufForInterpo; + int64_t *pPrimaryKeys = (int64_t *)pLocalReducer->pBufForInterpo; + SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; - int64_t actualETime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime; + int64_t actualETime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->etime : pQueryInfo->stime; - tFilePage **pResPages = malloc(POINTER_BYTES * pCmd->fieldsInfo.numOfOutputCols); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->maxCapacity); + tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutputCols); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); + pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->capacity); } - char ** srcData = (char **)malloc((POINTER_BYTES + sizeof(int32_t)) * pCmd->fieldsInfo.numOfOutputCols); - int32_t *functions = (int32_t *)((char *)srcData + pCmd->fieldsInfo.numOfOutputCols * sizeof(void *)); + char ** srcData = (char **)malloc((POINTER_BYTES + sizeof(int32_t)) * pQueryInfo->fieldsInfo.numOfOutputCols); + int32_t *functions = (int32_t *)((char *)srcData + pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(void *)); - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - srcData[i] = pLocalReducer->pBufForInterpo + tscFieldInfoGetOffset(pCmd, i) * pInterpoInfo->numOfRawDataInRows; - functions[i] = tscSqlExprGet(pCmd, i)->functionId; + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + srcData[i] = pLocalReducer->pBufForInterpo + tscFieldInfoGetOffset(pQueryInfo, i) * pInterpoInfo->numOfRawDataInRows; + functions[i] = tscSqlExprGet(pQueryInfo, i)->functionId; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int8_t precision = pMeterMetaInfo->pMeterMeta->precision; while (1) { int32_t remains = taosNumOfRemainPoints(pInterpoInfo); - TSKEY etime = taosGetRevisedEndKey(actualETime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, + TSKEY etime = taosGetRevisedEndKey(actualETime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision); - int32_t nrows = taosGetNumOfResultWithInterpo(pInterpoInfo, pPrimaryKeys, remains, pCmd->nAggTimeInterval, etime, - pLocalReducer->resColModel->maxCapacity); + int32_t nrows = taosGetNumOfResultWithInterpo(pInterpoInfo, pPrimaryKeys, remains, pQueryInfo->nAggTimeInterval, etime, + pLocalReducer->resColModel->capacity); - int32_t newRows = taosDoInterpoResult(pInterpoInfo, pCmd->interpoType, pResPages, remains, nrows, - pCmd->nAggTimeInterval, pPrimaryKeys, pLocalReducer->resColModel, srcData, - pCmd->defaultVal, functions, pLocalReducer->resColModel->maxCapacity); + int32_t newRows = taosDoInterpoResult(pInterpoInfo, pQueryInfo->interpoType, pResPages, remains, nrows, + pQueryInfo->nAggTimeInterval, pPrimaryKeys, pLocalReducer->resColModel, srcData, + pQueryInfo->defaultVal, functions, pLocalReducer->resColModel->capacity); assert(newRows <= nrows); - if (pCmd->limit.offset < newRows) { - newRows -= pCmd->limit.offset; + if (pQueryInfo->limit.offset < newRows) { + newRows -= pQueryInfo->limit.offset; - if (pCmd->limit.offset > 0) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pCmd->limit.offset, newRows * pField->bytes); + if (pQueryInfo->limit.offset > 0) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); + memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pQueryInfo->limit.offset, newRows * pField->bytes); } } pRes->data = pLocalReducer->pFinalRes; pRes->numOfRows = newRows; - pRes->numOfTotal += newRows; + pRes->numOfTotalInCurrentClause += newRows; - pCmd->limit.offset = 0; + pQueryInfo->limit.offset = 0; break; } else { - pCmd->limit.offset -= newRows; + pQueryInfo->limit.offset -= newRows; pRes->numOfRows = 0; int32_t rpoints = taosNumOfRemainPoints(pInterpoInfo); @@ -902,7 +937,7 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo /* all output for current group are completed */ int32_t totalRemainRows = - taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeys, rpoints, pCmd->nAggTimeInterval, actualETime); + taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeys, rpoints, pQueryInfo->nAggTimeInterval, actualETime); if (totalRemainRows <= 0) { break; } @@ -911,33 +946,32 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo } if (pRes->numOfRows > 0) { - if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) { - int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit; + if (pQueryInfo->limit.limit >= 0 && pRes->numOfTotalInCurrentClause > pQueryInfo->limit.limit) { + int32_t overFlow = pRes->numOfTotalInCurrentClause - pQueryInfo->limit.limit; pRes->numOfRows -= overFlow; assert(pRes->numOfRows >= 0); - pRes->numOfTotal = pCmd->limit.limit; + pRes->numOfTotalInCurrentClause = pQueryInfo->limit.limit; pFinalDataPage->numOfElems -= overFlow; /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, pInterpoInfo); + savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pInterpoInfo); } - if (pCmd->order.order == TSQL_SO_ASC) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - - memcpy(pRes->data + pLocalReducer->resColModel->colOffset[i] * pRes->numOfRows, pResPages[i]->data, - pField->bytes * pRes->numOfRows); + if (pQueryInfo->order.order == TSQL_SO_ASC) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); + int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i); + memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, pField->bytes * pRes->numOfRows); } - } else { - reversedCopyFromInterpolationToDstBuf(pCmd, pRes, pResPages, pLocalReducer); + } else {//todo bug?? + reversedCopyFromInterpolationToDstBuf(pQueryInfo, pRes, pResPages, pLocalReducer); } } pFinalDataPage->numOfElems = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { tfree(pResPages[i]); } tfree(pResPages); @@ -946,13 +980,15 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo } static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { - tColModel *pColModel = pLocalReducer->pDesc->pSchema; - assert(pColModel->maxCapacity == 1 && tmpBuffer->numOfElems == 1); + SColumnModel *pColumnModel = pLocalReducer->pDesc->pColumnModel; + assert(pColumnModel->capacity == 1 && tmpBuffer->numOfElems == 1); // copy to previous temp buffer - for (int32_t i = 0; i < pLocalReducer->pDesc->pSchema->numOfCols; ++i) { - memcpy(pLocalReducer->prevRowOfInput + pColModel->colOffset[i], tmpBuffer->data + pColModel->colOffset[i], - pColModel->pFields[i].bytes); + for (int32_t i = 0; i < pColumnModel->numOfCols; ++i) { + SSchema* pSchema = getColumnModelSchema(pColumnModel, i); + int16_t offset = getColumnModelOffset(pColumnModel, i); + + memcpy(pLocalReducer->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes); } tmpBuffer->numOfElems = 0; @@ -961,8 +997,10 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) static void doExecuteSecondaryMerge(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, bool needInit) { // the tag columns need to be set before all functions execution - for(int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - SSqlExpr * pExpr = tscSqlExprGet(pCmd, j); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + for(int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, j); SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j]; tVariantAssign(&pCtx->param[0], &pExpr->param[0]); @@ -981,8 +1019,8 @@ static void doExecuteSecondaryMerge(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, } } - for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { - int32_t functionId = tscSqlExprGet(pCmd, j)->functionId; + for (int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) { + int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId; if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { continue; } @@ -999,11 +1037,11 @@ static void handleUnprocessedRow(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, tF } } -static int64_t getNumOfResultLocal(SSqlCmd *pCmd, SQLFunctionCtx *pCtx) { +static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) { int64_t maxOutput = 0; - - for (int32_t j = 0; j < pCmd->exprsInfo.numOfExprs; ++j) { - int32_t functionId = tscSqlExprGet(pCmd, j)->functionId; + + for (int32_t j = 0; j < pQueryInfo->exprsInfo.numOfExprs; ++j) { + int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId; /* * ts, tag, tagprj function can not decide the output number of current query @@ -1026,10 +1064,10 @@ static int64_t getNumOfResultLocal(SSqlCmd *pCmd, SQLFunctionCtx *pCtx) { * filled with the same result, which is the tags, specified in group by clause * */ -static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReducer *pLocalReducer) { +static void fillMultiRowsOfTagsVal(SQueryInfo* pQueryInfo, int32_t numOfRes, SLocalReducer *pLocalReducer) { int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); if (maxBufSize < pExpr->resBytes && pExpr->functionId == TSDB_FUNC_TAG) { maxBufSize = pExpr->resBytes; } @@ -1038,8 +1076,8 @@ static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReduce assert(maxBufSize >= 0); char *buf = malloc((size_t) maxBufSize); - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); if (pExpr->functionId != TSDB_FUNC_TAG) { continue; } @@ -1059,9 +1097,9 @@ static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReduce free(buf); } -int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); +int32_t finalizeRes(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer) { + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); aAggs[pExpr->functionId].xFinalize(&pLocalReducer->pCtx[k]); // allow to re-initialize for the next round @@ -1070,10 +1108,10 @@ int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { pLocalReducer->hasPrevRow = false; - int32_t numOfRes = (int32_t)getNumOfResultLocal(pCmd, pLocalReducer->pCtx); + int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalReducer->pCtx); pLocalReducer->pResultBuf->numOfElems += numOfRes; - fillMultiRowsOfTagsVal(pCmd, numOfRes, pLocalReducer); + fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalReducer); return numOfRes; } @@ -1084,15 +1122,15 @@ int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { * results generated by simple aggregation function, we merge them all into one points * *Exception*: column projection query, required no merge procedure */ -bool needToMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { +bool needToMerge(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { int32_t ret = 0; // merge all result by default - int16_t functionId = tscSqlExprGet(pCmd, 0)->functionId; + int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId; if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query ret = 1; // disable merge procedure } else { tOrderDescriptor *pDesc = pLocalReducer->pDesc; - if (pDesc->orderIdx.numOfOrderedCols > 0) { + if (pDesc->orderIdx.numOfCols > 0) { if (pDesc->tsOrder == TSQL_SO_ASC) { // asc // todo refactor comparator ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data); @@ -1106,24 +1144,25 @@ bool needToMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuff return (ret == 0); } -static bool reachGroupResultLimit(SSqlCmd *pCmd, SSqlRes *pRes) { - return (pRes->numOfGroups >= pCmd->slimit.limit && pCmd->slimit.limit >= 0); +static bool reachGroupResultLimit(SQueryInfo* pQueryInfo, SSqlRes *pRes) { + return (pRes->numOfGroups >= pQueryInfo->slimit.limit && pQueryInfo->slimit.limit >= 0); } static bool saveGroupResultInfo(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); pRes->numOfGroups += 1; // the output group is limited by the slimit clause - if (reachGroupResultLimit(pCmd, pRes)) { + if (reachGroupResultLimit(pQueryInfo, pRes)) { return true; } // pRes->pGroupRec = realloc(pRes->pGroupRec, pRes->numOfGroups*sizeof(SResRec)); // pRes->pGroupRec[pRes->numOfGroups-1].numOfRows = pRes->numOfRows; - // pRes->pGroupRec[pRes->numOfGroups-1].numOfTotal = pRes->numOfTotal; + // pRes->pGroupRec[pRes->numOfGroups-1].numOfTotalInCurrentClause = pRes->numOfTotalInCurrentClause; return false; } @@ -1138,8 +1177,10 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) { SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); tFilePage *pResBuf = pLocalReducer->pResultBuf; - tColModel *pModel = pLocalReducer->resColModel; + SColumnModel *pModel = pLocalReducer->resColModel; pRes->code = TSDB_CODE_SUCCESS; @@ -1147,14 +1188,14 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no * ignore the output of the current group since this group is skipped by user * We set the numOfRows to be 0 and discard the possible remain results. */ - if (pCmd->slimit.offset > 0) { + if (pQueryInfo->slimit.offset > 0) { pRes->numOfRows = 0; - pCmd->slimit.offset -= 1; + pQueryInfo->slimit.offset -= 1; pLocalReducer->discard = !noMoreCurrentGroupRes; return false; } - tColModelCompact(pModel, pResBuf, pModel->maxCapacity); + tColModelCompact(pModel, pResBuf, pModel->capacity); memcpy(pLocalReducer->pBufForInterpo, pResBuf->data, pLocalReducer->nResultBufSize); #ifdef _DEBUG_VIEW @@ -1163,24 +1204,26 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no #endif SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; - int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols; + int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols; - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { + for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { + int16_t offset = getColumnModelOffset(pModel, startIndex + i); + SSchema* pSchema = getColumnModelSchema(pModel, startIndex + i); + memcpy(pInterpoInfo->pTags[i], - pLocalReducer->pBufForInterpo + pModel->colOffset[startIndex + i] * pResBuf->numOfElems, - pModel->pFields[startIndex + i].bytes); + pLocalReducer->pBufForInterpo + offset * pResBuf->numOfElems, pSchema->bytes); } - taosInterpoSetStartInfo(&pLocalReducer->interpolationInfo, pResBuf->numOfElems, pCmd->interpoType); + taosInterpoSetStartInfo(&pLocalReducer->interpolationInfo, pResBuf->numOfElems, pQueryInfo->interpoType); doInterpolateResult(pSql, pLocalReducer, noMoreCurrentGroupRes); return true; } -void resetOutputBuf(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { +void resetOutputBuf(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { pLocalReducer->pCtx[i].aOutputBuf = - pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pLocalReducer->resColModel->maxCapacity; + pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->capacity; } memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage)); @@ -1189,18 +1232,21 @@ void resetOutputBuf(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // reset out static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // In handling data in other groups, we need to reset the interpolation information for a new group data pRes->numOfRows = 0; - pRes->numOfTotal = 0; - pCmd->limit.offset = pLocalReducer->offset; + pRes->numOfTotalInCurrentClause = 0; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + pQueryInfo->limit.offset = pLocalReducer->offset; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int16_t precision = pMeterMetaInfo->pMeterMeta->precision; // for group result interpolation, do not return if not data is generated - if (pCmd->interpoType != TSDB_INTERPO_NONE) { - int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; - int64_t newTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, precision); + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime; + int64_t newTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision); - taosInitInterpoInfo(&pLocalReducer->interpolationInfo, pCmd->order.order, newTime, pCmd->groupbyExpr.numOfGroupCols, + taosInitInterpoInfo(&pLocalReducer->interpolationInfo, pQueryInfo->order.order, newTime, pQueryInfo->groupbyExpr.numOfGroupCols, pLocalReducer->rowSize); } } @@ -1213,22 +1259,23 @@ static bool doInterpolationForCurrentGroup(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SLocalReducer * pLocalReducer = pRes->pLocalReducer; SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int8_t p = pMeterMetaInfo->pMeterMeta->precision; if (taosHasRemainsDataForInterpolation(pInterpoInfo)) { - assert(pCmd->interpoType != TSDB_INTERPO_NONE); + assert(pQueryInfo->interpoType != TSDB_INTERPO_NONE); tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf; int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pInterpoInfo->numOfRawDataInRows - 1)); int32_t remain = taosNumOfRemainPoints(pInterpoInfo); - TSKEY ekey = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, p); + TSKEY ekey = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, p); int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY *)pLocalReducer->pBufForInterpo, remain, - pCmd->nAggTimeInterval, ekey, pLocalReducer->resColModel->maxCapacity); + pQueryInfo->nAggTimeInterval, ekey, pLocalReducer->resColModel->capacity); if (rows > 0) { // do interpo doInterpolateResult(pSql, pLocalReducer, false); } @@ -1248,18 +1295,19 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int8_t precision = pMeterMetaInfo->pMeterMeta->precision; if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL || prevGroupCompleted) { // if interpoType == TSDB_INTERPO_NONE, return directly - if (pCmd->interpoType != TSDB_INTERPO_NONE) { - int64_t etime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime; + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + int64_t etime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->etime : pQueryInfo->stime; - etime = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, precision); - int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, NULL, 0, pCmd->nAggTimeInterval, etime, - pLocalReducer->resColModel->maxCapacity); + etime = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision); + int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, NULL, 0, pQueryInfo->nAggTimeInterval, etime, + pLocalReducer->resColModel->capacity); if (rows > 0) { // do interpo doInterpolateResult(pSql, pLocalReducer, true); } @@ -1289,10 +1337,12 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { static void doMergeWithPrevRows(SSqlObj *pSql, int32_t numOfRes) { SSqlCmd * pCmd = &pSql->cmd; SSqlRes * pRes = &pSql->res; + SLocalReducer *pLocalReducer = pRes->pLocalReducer; - - for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k]; pCtx->aOutputBuf += pCtx->outputBytes * numOfRes; @@ -1306,24 +1356,22 @@ static void doMergeWithPrevRows(SSqlObj *pSql, int32_t numOfRes) { doExecuteSecondaryMerge(pCmd, pLocalReducer, true); } -int32_t tscLocalDoReduce(SSqlObj *pSql) { +int32_t tscDoLocalreduce(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - + + tscResetForNextRetrieve(pRes); + if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed tscTrace("%s call the drop local reducer", __FUNCTION__); tscDestroyLocalReducer(pSql); - pRes->numOfRows = 0; - pRes->row = 0; return 0; } - - pRes->row = 0; - pRes->numOfRows = 0; - + SLocalReducer *pLocalReducer = pRes->pLocalReducer; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + // set the data merge in progress int32_t prevStatus = atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS); @@ -1348,7 +1396,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { // clear buffer handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer); - tColModel *pModel = pLocalReducer->pDesc->pSchema; + SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel; while (1) { if (isAllSourcesCompleted(pLocalReducer)) { @@ -1365,14 +1413,14 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { SLocalDataSource *pOneDataSrc = pLocalReducer->pLocalDataSrc[pTree->pNode[0].index]; tColModelAppend(pModel, tmpBuffer, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1, - pOneDataSrc->pMemBuffer->pColModel->maxCapacity); + pOneDataSrc->pMemBuffer->pColumnModel->capacity); #if defined(_DEBUG_VIEW) printf("chosen row:\t"); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, pCmd); + tscGetSrcColumnInfo(colInfo, pQueryInfo); - tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->numOfElems, pModel->maxCapacity, colInfo); + tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->numOfElems, pModel->capacity, colInfo); #endif if (pLocalReducer->discard) { @@ -1406,7 +1454,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { } if (pLocalReducer->hasPrevRow) { - if (needToMerge(pCmd, pLocalReducer, tmpBuffer)) { + if (needToMerge(pQueryInfo, pLocalReducer, tmpBuffer)) { // belong to the group of the previous row, continue process it doExecuteSecondaryMerge(pCmd, pLocalReducer, false); @@ -1417,7 +1465,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { * current row does not belong to the group of previous row. * so the processing of previous group is completed. */ - int32_t numOfRes = finalizeRes(pCmd, pLocalReducer); + int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer); bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer); tFilePage *pResBuf = pLocalReducer->pResultBuf; @@ -1427,7 +1475,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { * continue to process results instead of return results. */ if ((!sameGroup && pResBuf->numOfElems > 0) || - (pResBuf->numOfElems == pLocalReducer->resColModel->maxCapacity)) { + (pResBuf->numOfElems == pLocalReducer->resColModel->capacity)) { // does not belong to the same group bool notSkipped = doGenerateFinalResults(pSql, pLocalReducer, !sameGroup); @@ -1440,7 +1488,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { pLocalReducer->hasUnprocessedRow = true; } - resetOutputBuf(pCmd, pLocalReducer); + resetOutputBuf(pQueryInfo, pLocalReducer); pOneDataSrc->rowIdx += 1; // here we do not check the return value @@ -1494,7 +1542,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) { } if (pLocalReducer->hasPrevRow) { - finalizeRes(pCmd, pLocalReducer); + finalizeRes(pQueryInfo, pLocalReducer); } if (pLocalReducer->pResultBuf->numOfElems) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 6981c16e9e1a5faf62453c56e454f4839fa45c43..ea064093cd15c23211f81e8ca1a4663e25fd862a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -18,46 +18,81 @@ #include "trpc.h" #include "tscJoinProcess.h" #include "tscProfile.h" +#include "tscSQLParser.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" #include "tscompression.h" #include "tsocket.h" -#include "tscSQLParser.h" #include "ttime.h" #include "ttimer.h" #include "tutil.h" #define TSC_MGMT_VNODE 999 -#ifdef CLUSTER - SIpStrList tscMgmtIpList; - int tsMasterIndex = 0; - int tsSlaveIndex = 1; -#else - int tsMasterIndex = 0; - int tsSlaveIndex = 0; // slave == master for single node edition - uint32_t tsServerIp; -#endif +SIpStrList tscMgmtIpList; +int tsMasterIndex = 0; +int tsSlaveIndex = 1; + +int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; -int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql); int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql); +char *doBuildMsgHeader(SSqlObj *pSql, char **pStart); void (*tscUpdateVnodeMsg[TSDB_SQL_MAX])(SSqlObj *pSql, char *buf); void tscProcessActivityTimer(void *handle, void *tmrId); int tscKeepConn[TSDB_SQL_MAX] = {0}; +TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid); +void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts); +void tscSaveSubscriptionProgress(void* sub); static int32_t minMsgSize() { return tsRpcHeadSize + sizeof(STaosDigest); } -#ifdef CLUSTER void tscPrintMgmtIp() { if (tscMgmtIpList.numOfIps <= 0) { - tscError("invalid IP list:%d", tscMgmtIpList.numOfIps); + tscError("invalid mgmt IP list:%d", tscMgmtIpList.numOfIps); } else { - for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) tscTrace("mgmt index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); + for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) { + tscTrace("mgmt index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); + } + } +} + +void tscSetMgmtIpListFromCluster(SIpList *pIpList) { + tscMgmtIpList.numOfIps = pIpList->numOfIps; + if (memcmp(tscMgmtIpList.ip, pIpList->ip, pIpList->numOfIps * 4) != 0) { + for (int i = 0; i < pIpList->numOfIps; ++i) { + tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); + tscMgmtIpList.ip[i] = pIpList->ip[i]; + } + tscTrace("cluster mgmt IP list:"); + tscPrintMgmtIp(); + } +} + +void tscSetMgmtIpListFromEdge() { + if (tscMgmtIpList.numOfIps != 2) { + tscMgmtIpList.numOfIps = 2; + strcpy(tscMgmtIpList.ipstr[0], tsMasterIp); + tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); + strcpy(tscMgmtIpList.ipstr[1], tsMasterIp); + tscMgmtIpList.ip[1] = inet_addr(tsMasterIp); + tscTrace("edge mgmt IP list:"); + tscPrintMgmtIp(); + } +} + +void tscSetMgmtIpList(SIpList *pIpList) { + /* + * The iplist returned by the cluster edition is the current management nodes + * and the iplist returned by the edge edition is empty + */ + if (pIpList->numOfIps != 0) { + tscSetMgmtIpListFromCluster(pIpList); + } else { + tscSetMgmtIpListFromEdge(); } } -#endif /* * For each management node, try twice at least in case of poor network situation. @@ -68,11 +103,7 @@ void tscPrintMgmtIp() { */ static int32_t tscGetMgmtConnMaxRetryTimes() { int32_t factor = 2; -#ifdef CLUSTER return tscMgmtIpList.numOfIps * factor; -#else - return 1*factor; -#endif } void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { @@ -88,18 +119,9 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { if (code == 0) { SHeartBeatRsp *pRsp = (SHeartBeatRsp *)pRes->pRsp; -#ifdef CLUSTER SIpList * pIpList = &pRsp->ipList; - tscMgmtIpList.numOfIps = pIpList->numOfIps; - if (memcmp(tscMgmtIpList.ip, pIpList->ip, pIpList->numOfIps * 4) != 0) { - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - } - tscTrace("new mgmt IP list:"); - tscPrintMgmtIp(); - } -#endif + tscSetMgmtIpList(pIpList); + if (pRsp->killConnection) { tscKillConnection(pObj); } else { @@ -125,7 +147,11 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { if (NULL == pSql) return; pSql->fp = tscProcessHeartBeatRsp; - pSql->cmd.command = TSDB_SQL_HB; + + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo); + pQueryInfo->command = TSDB_SQL_HB; + if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) { tfree(pSql); return; @@ -135,6 +161,8 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { pSql->pTscObj = pObj; pSql->signature = pSql; pObj->pHb = pSql; + tscAddSubqueryInfo(&pObj->pHb->cmd); + tscTrace("%p pHb is allocated, pObj:%p", pObj->pHb, pObj); } @@ -152,19 +180,12 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { STscObj *pTscObj = pSql->pTscObj; -#ifdef CLUSTER if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { *pCode = 0; pSql->retry++; pSql->index = pSql->index % tscMgmtIpList.numOfIps; if (pSql->cmd.command > TSDB_SQL_READ && pSql->index == 0) pSql->index = 1; void *thandle = taosGetConnFromCache(tscConnCache, tscMgmtIpList.ip[pSql->index], TSC_MGMT_VNODE, pTscObj->user); -#else - if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { - *pCode = 0; - pSql->retry++; - void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, TSC_MGMT_VNODE, pTscObj->user); -#endif if (thandle == NULL) { SRpcConnInit connInit; @@ -180,38 +201,28 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { connInit.encrypt = 0; connInit.secret = pSql->pTscObj->pass; -#ifdef CLUSTER connInit.peerIp = tscMgmtIpList.ipstr[pSql->index]; -#else - connInit.peerIp = tsServerIpStr; -#endif thandle = taosOpenRpcConn(&connInit, pCode); } pSql->thandle = thandle; -#ifdef CLUSTER pSql->ip = tscMgmtIpList.ip[pSql->index]; pSql->vnode = TSC_MGMT_VNODE; tscTrace("%p mgmt index:%d ip:0x%x is picked up, pConn:%p", pSql, pSql->index, tscMgmtIpList.ip[pSql->index], pSql->thandle); -#else - pSql->ip = tsServerIp; - pSql->vnode = TSC_MGMT_VNODE; -#endif } - + // the pSql->res.code is the previous error(status) code. if (pSql->thandle == NULL && pSql->retry >= pSql->maxRetry) { if (pSql->res.code != TSDB_CODE_SUCCESS && pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS) { *pCode = pSql->res.code; } - + tscError("%p reach the max retry:%d, code:%d", pSql, pSql->retry, *pCode); } } void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { - char ipstr[40] = {0}; SVPeerDesc *pVPeersDesc = NULL; static int vidIndex = 0; STscObj * pTscObj = pSql->pTscObj; @@ -219,10 +230,10 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { pSql->thandle = NULL; SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // multiple vnode query - SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pCmd->vnodeIdx); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { // multiple vnode query + SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex); if (vnodeList != NULL) { pVPeersDesc = vnodeList->vpeerDesc; } @@ -243,10 +254,13 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { while (pSql->retry < pSql->maxRetry) { (pSql->retry)++; -#ifdef CLUSTER + char ipstr[40] = {0}; if (pVPeersDesc[pSql->index].ip == 0) { - (pSql->index) = (pSql->index + 1) % TSDB_VNODES_SUPPORT; - continue; + /* + * in the edge edition, ip is 0, and at this time we use masterIp instead + * in the cluster edition, ip is vnode ip + */ + pVPeersDesc[pSql->index].ip = tscMgmtIpList.ip[0]; } *pCode = TSDB_CODE_SUCCESS; @@ -276,41 +290,16 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { pSql->vnode = pVPeersDesc[pSql->index].vnode; tscTrace("%p vnode:%d ip:%p index:%d is picked up, pConn:%p", pSql, pVPeersDesc[pSql->index].vnode, pVPeersDesc[pSql->index].ip, pSql->index, pSql->thandle); -#else - *pCode = 0; - void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, pVPeersDesc[0].vnode, pTscObj->user); - - if (thandle == NULL) { - SRpcConnInit connInit; - memset(&connInit, 0, sizeof(connInit)); - connInit.cid = vidIndex; - connInit.sid = 0; - connInit.spi = 0; - connInit.encrypt = 0; - connInit.meterId = pSql->pTscObj->user; - connInit.peerId = htonl((pVPeersDesc[0].vnode << TSDB_SHELL_VNODE_BITS)); - connInit.shandle = pVnodeConn; - connInit.ahandle = pSql; - connInit.peerIp = tsServerIpStr; - connInit.peerPort = tsVnodeShellPort; - thandle = taosOpenRpcConn(&connInit, pCode); - vidIndex = (vidIndex + 1) % tscNumOfThreads; - } - - pSql->thandle = thandle; - pSql->ip = tsServerIp; - pSql->vnode = pVPeersDesc[0].vnode; -#endif break; } - + // the pSql->res.code is the previous error(status) code. if (pSql->thandle == NULL && pSql->retry >= pSql->maxRetry) { if (pSql->res.code != TSDB_CODE_SUCCESS && pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS) { *pCode = pSql->res.code; } - + tscError("%p reach the max retry:%d, code:%d", pSql, pSql->retry, *pCode); } } @@ -352,14 +341,14 @@ int tscSendMsgToServer(SSqlObj *pSql) { * this SQL object may be released by other thread due to the completion of this query even before the log * is dumped to log file. So the signature needs to be kept in a local variable. */ - uint64_t signature = (uint64_t) pSql->signature; + uint64_t signature = (uint64_t)pSql->signature; if (tscUpdateVnodeMsg[pSql->cmd.command]) (*tscUpdateVnodeMsg[pSql->cmd.command])(pSql, buf); - + int ret = taosSendMsgToPeerH(pSql->thandle, pStart, pSql->cmd.payloadLen, pSql); if (ret >= 0) { code = 0; } - + tscTrace("%p send msg ret:%d code:%d sig:%p", pSql, ret, code, signature); } } @@ -367,15 +356,9 @@ int tscSendMsgToServer(SSqlObj *pSql) { return code; } -#ifdef CLUSTER void tscProcessMgmtRedirect(SSqlObj *pSql, uint8_t *cont) { SIpList *pIpList = (SIpList *)(cont); - tscMgmtIpList.numOfIps = pIpList->numOfIps; - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - tscTrace("Update mgmt Ip, index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); - } + tscSetMgmtIpList(pIpList); if (pSql->cmd.command < TSDB_SQL_READ) { tsMasterIndex = 0; @@ -386,7 +369,6 @@ void tscProcessMgmtRedirect(SSqlObj *pSql, uint8_t *cont) { tscPrintMgmtIp(); } -#endif void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (ahandle == NULL) return NULL; @@ -418,15 +400,11 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { return ahandle; } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); if (msg == NULL) { - tscTrace("%p no response from ip:0x%x", pSql, pSql->ip); - -#ifdef CLUSTER + tscTrace("%p no response from ip:%s", pSql, taosIpStr(pSql->ip)); + pSql->index++; -#else - // for single node situation, do NOT try next index -#endif pSql->thandle = NULL; // todo taos_stop_query() in async model /* @@ -442,12 +420,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { // renew meter meta in case it is changed if (pCmd->command < TSDB_SQL_FETCH && pRes->code != TSDB_CODE_QUERY_CANCELLED) { -#ifdef CLUSTER pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; -#else - // for fetch, it shall not renew meter meta - pSql->maxRetry = 2; -#endif code = tscRenewMeterMeta(pSql, pMeterMetaInfo->name); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; @@ -460,8 +433,6 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { } else { uint16_t rspCode = pMsg->content[0]; -#ifdef CLUSTER - if (rspCode == TSDB_CODE_REDIRECT) { tscTrace("%p it shall be redirected!", pSql); taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); @@ -469,7 +440,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (pCmd->command > TSDB_SQL_MGMT) { tscProcessMgmtRedirect(pSql, pMsg->content + 1); - } else if (pCmd->command == TSDB_SQL_INSERT){ + } else if (pCmd->command == TSDB_SQL_INSERT) { pSql->index++; pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; } else { @@ -481,7 +452,8 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { msg = NULL; } else if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID || rspCode == TSDB_CODE_INVALID_VNODE_ID || rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || - rspCode == TSDB_CODE_NETWORK_UNAVAIL || rspCode == TSDB_CODE_NOT_ACTIVE_SESSION) { + rspCode == TSDB_CODE_NETWORK_UNAVAIL || rspCode == TSDB_CODE_NOT_ACTIVE_SESSION || + rspCode == TSDB_CODE_TABLE_ID_MISMATCH) { /* * not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized, * the virtual node may have not create table till now, so try again by using the new metermeta. @@ -492,33 +464,19 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { * removed. So, renew metermeta and try again. * not_active_session: db has been move to other node, the vnode does not exist on this dnode anymore. */ - -#else - if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID || - rspCode == TSDB_CODE_INVALID_VNODE_ID || rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || - rspCode == TSDB_CODE_NETWORK_UNAVAIL) { -#endif - pSql->thandle = NULL; + pSql->thandle = NULL; taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); - - if ((pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_SELECT) && - (rspCode == TSDB_CODE_INVALID_TABLE_ID || rspCode == TSDB_CODE_INVALID_VNODE_ID)) { - /* - * In case of the insert/select operations, the invalid table(vnode) id means - * the submit/query msg is invalid, renew meter meta will not help to fix this problem, - * so return the invalid_query_msg to client directly. - */ - code = TSDB_CODE_INVALID_QUERY_MSG; - } else if (pCmd->command == TSDB_SQL_CONNECT) { + + if (pCmd->command == TSDB_SQL_CONNECT) { code = TSDB_CODE_NETWORK_UNAVAIL; } else if (pCmd->command == TSDB_SQL_HB) { code = TSDB_CODE_NOT_READY; } else { tscTrace("%p it shall renew meter meta, code:%d", pSql, rspCode); - + pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; - pSql->res.code = (uint8_t) rspCode; // keep the previous error code - + pSql->res.code = (uint8_t)rspCode; // keep the previous error code + code = tscRenewMeterMeta(pSql, pMeterMetaInfo->name); if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; @@ -542,7 +500,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (pMeterMetaInfo->pMeterMeta) // it may be deleted pMeterMetaInfo->pMeterMeta->index = pSql->index; } else { - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pSql->cmd.vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex); pVnodeSidList->index = pSql->index; } } else { @@ -650,78 +608,102 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { } static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj); -static int tscLaunchMetricSubQueries(SSqlObj *pSql); +static int tscLaunchSTableSubqueries(SSqlObj *pSql); // todo merge with callback -int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeIdx, SJoinSubquerySupporter *pSupporter) { - SSqlCmd *pCmd = &pSql->cmd; +int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySupporter *pSupporter) { + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); pSql->res.qhandle = 0x1; pSql->res.numOfRows = 0; if (pSql->pSubs == NULL) { - pSql->pSubs = malloc(POINTER_BYTES * pSupporter->pState->numOfTotal); + pSql->pSubs = calloc(pSupporter->pState->numOfTotal, POINTER_BYTES); if (pSql->pSubs == NULL) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } } - SSqlObj *pNew = createSubqueryObj(pSql, vnodeIdx, tableIndex, tscJoinQueryCallback, pSupporter, NULL); + SSqlObj *pNew = createSubqueryObj(pSql, tableIndex, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } pSql->pSubs[pSql->numOfSubs++] = pNew; + assert(pSql->numOfSubs <= pSupporter->pState->numOfTotal); - if (QUERY_IS_JOIN_QUERY(pCmd->type)) { - addGroupInfoForSubquery(pSql, pNew, tableIndex); + if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { + addGroupInfoForSubquery(pSql, pNew, 0, tableIndex); // refactor as one method - tscColumnBaseInfoUpdateTableIndex(&pNew->cmd.colList, 0); - tscColumnBaseInfoCopy(&pSupporter->colList, &pNew->cmd.colList, 0); + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + assert(pNewQueryInfo != NULL); - tscSqlExprCopy(&pSupporter->exprsInfo, &pNew->cmd.exprsInfo, pSupporter->uid); + tscColumnBaseInfoUpdateTableIndex(&pNewQueryInfo->colList, 0); + tscColumnBaseInfoCopy(&pSupporter->colList, &pNewQueryInfo->colList, 0); - tscFieldInfoCopyAll(&pNew->cmd.fieldsInfo, &pSupporter->fieldsInfo); - tscTagCondCopy(&pSupporter->tagCond, &pNew->cmd.tagCond); - pSupporter->groupbyExpr = pNew->cmd.groupbyExpr; + tscSqlExprCopy(&pSupporter->exprsInfo, &pNewQueryInfo->exprsInfo, pSupporter->uid); + + tscFieldInfoCopyAll(&pSupporter->fieldsInfo, &pNewQueryInfo->fieldsInfo); + tscTagCondCopy(&pSupporter->tagCond, &pNewQueryInfo->tagCond); pNew->cmd.numOfCols = 0; - pNew->cmd.nAggTimeInterval = 0; - memset(&pNew->cmd.limit, 0, sizeof(SLimitVal)); - memset(&pNew->cmd.groupbyExpr, 0, sizeof(SSqlGroupbyExpr)); + pNewQueryInfo->nAggTimeInterval = 0; + memset(&pNewQueryInfo->limit, 0, sizeof(SLimitVal)); + + // backup the data and clear it in the sqlcmd object + pSupporter->groupbyExpr = pNewQueryInfo->groupbyExpr; + memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr)); // set the ts,tags that involved in join, as the output column of intermediate result - tscFreeSqlCmdData(&pNew->cmd); + tscClearSubqueryInfo(&pNew->cmd); SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddSpecialColumnForSelect(&pNew->cmd, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL); + tscAddSpecialColumnForSelect(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL); // set the tags value for ts_comp function - SSqlExpr *pExpr = tscSqlExprGet(&pNew->cmd, 0); + SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0); - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); - int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pNewQueryInfo, 0); + int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pSupporter->tagCond, pMeterMetaInfo->pMeterMeta->uid); pExpr->param->i64Key = tagColIndex; pExpr->numOfParams = 1; - addRequiredTagColumn(pCmd, tagColIndex, 0); - // add the filter tag column for (int32_t i = 0; i < pSupporter->colList.numOfCols; ++i) { SColumnBase *pColBase = &pSupporter->colList.pColList[i]; if (pColBase->numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered. - tscColumnBaseCopy(&pNew->cmd.colList.pColList[pNew->cmd.colList.numOfCols], pColBase); - pNew->cmd.colList.numOfCols++; + tscColumnBaseCopy(&pNewQueryInfo->colList.pColList[pNewQueryInfo->colList.numOfCols], pColBase); + pNewQueryInfo->colList.numOfCols++; } } + + tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, transfer to ts_comp query to retrieve timestamps, " + "exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s", + pSql, pNew, tableIndex, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, + pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols, + pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name); + tscPrintSelectClause(pNew, 0); + + tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, transfer to ts_comp query to retrieve timestamps, " + "exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s", + pSql, pNew, tableIndex, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, + pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols, + pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name); + tscPrintSelectClause(pNew, 0); } else { - pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; } +#ifdef _DEBUG_VIEW + tscPrintSelectClause(pNew, 0); +#endif + return tscProcessSql(pNew); } @@ -729,30 +711,31 @@ int doProcessSql(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - int32_t code = TSDB_CODE_SUCCESS; - void *asyncFp = pSql->fp; - if (tscBuildMsg[pCmd->command](pSql) < 0) { // build msg failed - code = TSDB_CODE_APP_ERROR; - } else { - code = tscSendMsgToServer(pSql); + if (pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_FETCH || pCmd->command == TSDB_SQL_RETRIEVE || + pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_CONNECT || pCmd->command == TSDB_SQL_HB || + pCmd->command == TSDB_SQL_META || pCmd->command == TSDB_SQL_METRIC) { + tscBuildMsg[pCmd->command](pSql, NULL); } + + int32_t code = tscSendMsgToServer(pSql); + if (asyncFp) { - if (code != 0) { + if (code != TSDB_CODE_SUCCESS) { pRes->code = code; tscQueueAsyncRes(pSql); } return 0; } - if (code != 0) { + if (code != TSDB_CODE_SUCCESS) { pRes->code = code; return code; } tsem_wait(&pSql->rspSem); - if (pRes->code == 0 && tscProcessMsgRsp[pCmd->command]) (*tscProcessMsgRsp[pCmd->command])(pSql); + if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) (*tscProcessMsgRsp[pCmd->command])(pSql); tsem_post(&pSql->emptyRspSem); @@ -760,35 +743,43 @@ int doProcessSql(SSqlObj *pSql) { } int tscProcessSql(SSqlObj *pSql) { - char * name = NULL; - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + char * name = NULL; + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = NULL; + int16_t type = 0; + + if (pQueryInfo != NULL) { + pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (pMeterMetaInfo != NULL) { + name = pMeterMetaInfo->name; + } - if (pMeterMetaInfo != NULL) { - name = pMeterMetaInfo->name; + type = pQueryInfo->type; + + // for hearbeat, numOfTables == 0; + assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0); } - tscTrace("%p SQL cmd:%d will be processed, name:%s, type:%d", pSql, pSql->cmd.command, name, pSql->cmd.type); + tscTrace("%p SQL cmd:%d will be processed, name:%s, type:%d", pSql, pCmd->command, name, type); pSql->retry = 0; if (pSql->cmd.command < TSDB_SQL_MGMT) { -#ifdef CLUSTER pSql->maxRetry = TSDB_VNODES_SUPPORT; -#else - pSql->maxRetry = 2; -#endif - + // the pMeterMetaInfo cannot be NULL if (pMeterMetaInfo == NULL) { pSql->res.code = TSDB_CODE_OTHERS; return pSql->res.code; } - + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { pSql->index = pMeterMetaInfo->pMeterMeta->index; } else { // it must be the parent SSqlObj for super table query - if ((pSql->cmd.type & TSDB_QUERY_TYPE_SUBQUERY) != 0) { - int32_t idx = pSql->cmd.vnodeIdx; + if ((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) != 0) { + int32_t idx = pMeterMetaInfo->vnodeIndex; + SVnodeSidList *pSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, idx); pSql->index = pSidList->index; } @@ -800,23 +791,24 @@ int tscProcessSql(SSqlObj *pSql) { } // todo handle async situation - if (QUERY_IS_JOIN_QUERY(pSql->cmd.type)) { - if ((pSql->cmd.type & TSDB_QUERY_TYPE_SUBQUERY) == 0) { + if (QUERY_IS_JOIN_QUERY(type)) { + if ((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0) { SSubqueryState *pState = calloc(1, sizeof(SSubqueryState)); - pState->numOfTotal = pSql->cmd.numOfTables; - for (int32_t i = 0; i < pSql->cmd.numOfTables; ++i) { + pState->numOfTotal = pQueryInfo->numOfTables; + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { SJoinSubquerySupporter *pSupporter = tscCreateJoinSupporter(pSql, pState, i); if (pSupporter == NULL) { // failed to create support struct, abort current query tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i); - pState->numOfCompleted = pSql->cmd.numOfTables - i - 1; + pState->numOfCompleted = pQueryInfo->numOfTables - i - 1; pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; return pSql->res.code; } - int32_t code = tscLaunchJoinSubquery(pSql, i, 0, pSupporter); + int32_t code = tscLaunchJoinSubquery(pSql, i, pSupporter); if (code != TSDB_CODE_SUCCESS) { // failed to create subquery object, quit query tscDestroyJoinSupporter(pSupporter); pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; @@ -825,10 +817,10 @@ int tscProcessSql(SSqlObj *pSql) { } } - sem_post(&pSql->emptyRspSem); - sem_wait(&pSql->rspSem); + tsem_post(&pSql->emptyRspSem); + tsem_wait(&pSql->rspSem); - sem_post(&pSql->emptyRspSem); + tsem_post(&pSql->emptyRspSem); if (pSql->numOfSubs <= 0) { pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; @@ -839,16 +831,16 @@ int tscProcessSql(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } else { // for first stage sub query, iterate all vnodes to get all timestamp - if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { return doProcessSql(pSql); } } } - if (tscIsTwoStageMergeMetricQuery(pCmd)) { + if (tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) { /* * (ref. line: 964) - * Before this function returns from tscLaunchMetricSubQueries and continues, pSql may have been released at user + * Before this function returns from tscLaunchSTableSubqueries and continues, pSql may have been released at user * program context after retrieving all data from vnodes. User function is called at tscRetrieveFromVnodeCallBack. * * when pSql being released, pSql->fp == NULL, it may pass the check of pSql->fp == NULL, @@ -856,14 +848,14 @@ int tscProcessSql(SSqlObj *pSql) { */ void *fp = pSql->fp; - if (tscLaunchMetricSubQueries(pSql) != TSDB_CODE_SUCCESS) { + if (tscLaunchSTableSubqueries(pSql) != TSDB_CODE_SUCCESS) { return pRes->code; } if (fp == NULL) { - sem_post(&pSql->emptyRspSem); - sem_wait(&pSql->rspSem); - sem_post(&pSql->emptyRspSem); + tsem_post(&pSql->emptyRspSem); + tsem_wait(&pSql->rspSem); + tsem_post(&pSql->emptyRspSem); // set the command flag must be after the semaphore been correctly set. pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; @@ -875,49 +867,50 @@ int tscProcessSql(SSqlObj *pSql) { return doProcessSql(pSql); } -static void doCleanupSubqueries(SSqlObj *pSql, int32_t vnodeIndex, int32_t numOfVnodes, SRetrieveSupport *pTrs, - tOrderDescriptor *pDesc, tColModel *pModel, tExtMemBuffer **pMemoryBuf, - SSubqueryState *pState) { - pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; - pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; - - /* - * if i > 0, at least one sub query is issued, the allocated resource is - * freed by it when subquery completed. - */ - if (vnodeIndex == 0) { - tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfVnodes); - tfree(pState); - - if (pTrs != NULL) { - tfree(pTrs->localBuffer); - - pthread_mutex_unlock(&pTrs->queryMutex); - pthread_mutex_destroy(&pTrs->queryMutex); - tfree(pTrs); - } +static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState* pState) { + assert(numOfSubs <= pSql->numOfSubs && numOfSubs >= 0 && pState != NULL); + + for(int32_t i = 0; i < numOfSubs; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + SRetrieveSupport* pSupport = pSub->param; + + tfree(pSupport->localBuffer); + + pthread_mutex_unlock(&pSupport->queryMutex); + pthread_mutex_destroy(&pSupport->queryMutex); + + tfree(pSupport); + + tscFreeSqlObj(pSub); } + + free(pState); } -int tscLaunchMetricSubQueries(SSqlObj *pSql) { +int tscLaunchSTableSubqueries(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; // pRes->code check only serves in launching metric sub-queries if (pRes->code == TSDB_CODE_QUERY_CANCELLED) { - pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; // enable the abort of kill metric function. - return pSql->res.code; + pCmd->command = TSDB_SQL_RETRIEVE_METRIC; // enable the abort of kill metric function. + return pRes->code; } tExtMemBuffer ** pMemoryBuf = NULL; tOrderDescriptor *pDesc = NULL; - tColModel * pModel = NULL; + SColumnModel * pModel = NULL; pRes->qhandle = 1; // hack the qhandle check - const uint32_t nBufferSize = (1 << 16); // 64KB - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - int32_t numOfVnodes = pMeterMetaInfo->pMetricMeta->numOfVnodes; - assert(numOfVnodes > 0); + const uint32_t nBufferSize = (1 << 16); // 64KB + + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + int32_t numOfSubQueries = pMeterMetaInfo->pMetricMeta->numOfVnodes; + assert(numOfSubQueries > 0); int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize); if (ret != 0) { @@ -928,37 +921,34 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { return pRes->code; } - pSql->pSubs = malloc(POINTER_BYTES * numOfVnodes); - pSql->numOfSubs = numOfVnodes; + pSql->pSubs = calloc(numOfSubQueries, POINTER_BYTES); + pSql->numOfSubs = numOfSubQueries; - tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfVnodes); + tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfSubQueries); SSubqueryState *pState = calloc(1, sizeof(SSubqueryState)); - pState->numOfTotal = numOfVnodes; + pState->numOfTotal = numOfSubQueries; pRes->code = TSDB_CODE_SUCCESS; - for (int32_t i = 0; i < numOfVnodes; ++i) { - if (pRes->code == TSDB_CODE_QUERY_CANCELLED || pRes->code == TSDB_CODE_CLI_OUT_OF_MEMORY) { - /* - * during launch sub queries, if the master query is cancelled. the remain is ignored and set the retrieveDoneRec - * to the value of remaining not built sub-queries. So, the already issued sub queries can successfully free - * allocated resources. - */ - pState->numOfCompleted = (numOfVnodes - i); - doCleanupSubqueries(pSql, i, numOfVnodes, NULL, pDesc, pModel, pMemoryBuf, pState); - - if (i == 0) { - return pSql->res.code; - } - + int32_t i = 0; + for (; i < numOfSubQueries; ++i) { + SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport)); + if (trs == NULL) { + tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); break; } - - SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport)); + trs->pExtMemBuffer = pMemoryBuf; trs->pOrderDescriptor = pDesc; trs->pState = pState; + trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); - trs->vnodeIdx = i; + if (trs->localBuffer == NULL) { + tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); + tfree(trs); + break; + } + + trs->subqueryIndex = i; trs->pParentSqlObj = pSql; trs->pFinalColModel = pModel; @@ -968,25 +958,43 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { pthread_mutexattr_destroy(&mutexattr); SSqlObj *pNew = tscCreateSqlObjForSubquery(pSql, trs, NULL); - if (pNew == NULL) { - pState->numOfCompleted = (numOfVnodes - i); - doCleanupSubqueries(pSql, i, numOfVnodes, trs, pDesc, pModel, pMemoryBuf, pState); - - if (i == 0) { - return pSql->res.code; - } - + tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); + tfree(trs->localBuffer); + tfree(trs); break; } // todo handle multi-vnode situation - if (pSql->cmd.tsBuf) { - pNew->cmd.tsBuf = tsBufClone(pSql->cmd.tsBuf); + if (pQueryInfo->tsBuf) { + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pNewQueryInfo->tsBuf = tsBufClone(pQueryInfo->tsBuf); } - - tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, pNew->cmd.vnodeIdx); - tscProcessSql(pNew); + + tscTrace("%p sub:%p create subquery success. orderOfSub:%d", pSql, pNew, trs->subqueryIndex); + } + + if (i < numOfSubQueries) { + tscError("%p failed to prepare subquery structure and launch subqueries", pSql); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries); + doCleanupSubqueries(pSql, i, pState); + return pRes->code; // free all allocated resource + } + + if (pRes->code == TSDB_CODE_QUERY_CANCELLED) { + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries); + doCleanupSubqueries(pSql, i, pState); + return pRes->code; + } + + for(int32_t j = 0; j < numOfSubQueries; ++j) { + SSqlObj* pSub = pSql->pSubs[j]; + SRetrieveSupport* pSupport = pSub->param; + + tscTrace("%p sub:%p launch subquery, orderOfSub:%d.", pSql, pSub, pSupport->subqueryIndex); + tscProcessSql(pSub); } return TSDB_CODE_SUCCESS; @@ -1034,13 +1042,16 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) { SSqlObj *pPObj = trsupport->pParentSqlObj; - int32_t idx = trsupport->vnodeIdx; + int32_t subqueryIndex = trsupport->subqueryIndex; assert(pSql != NULL); + SSubqueryState* pState = trsupport->pState; + assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 && + pPObj->numOfSubs == pState->numOfTotal); /* retrieved in subquery failed. OR query cancelled in retrieve phase. */ - if (trsupport->pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) { - trsupport->pState->code = -(int)pPObj->res.code; + if (pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) { + pState->code = -(int)pPObj->res.code; /* * kill current sub-query connection, which may retrieve data from vnodes; @@ -1049,34 +1060,34 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq pSql->res.numOfRows = 0; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts tscTrace("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, trsupport->pState->code); + subqueryIndex, pState->code); } if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query. - tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, idx); - tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, idx, - trsupport->pState->code); + tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, subqueryIndex); + tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, + subqueryIndex, pState->code); } else { - if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && trsupport->pState->code == TSDB_CODE_SUCCESS) { + if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pState->code == TSDB_CODE_SUCCESS) { /* * current query failed, and the retry count is less than the available * count, retry query clear previous retrieved data, then launch a new sub query */ - tExtMemBufferClear(trsupport->pExtMemBuffer[idx]); + tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]); // clear local saved number of results trsupport->localBuffer->numOfElems = 0; pthread_mutex_unlock(&trsupport->queryMutex); tscTrace("%p sub:%p retrieve failed, code:%d, orderOfSub:%d, retry:%d", trsupport->pParentSqlObj, pSql, numOfRows, - idx, trsupport->numOfRetry); + subqueryIndex, trsupport->numOfRetry); SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery sqlobj due to out of memory, abort retry", trsupport->pParentSqlObj, pSql); - trsupport->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; return; } @@ -1084,24 +1095,27 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq tscProcessSql(pNew); return; } else { // reach the maximum retry count, abort - atomic_val_compare_exchange_32(&trsupport->pState->code, TSDB_CODE_SUCCESS, numOfRows); + atomic_val_compare_exchange_32(&pState->code, TSDB_CODE_SUCCESS, numOfRows); tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql, - numOfRows, idx, trsupport->pState->code); + numOfRows, subqueryIndex, pState->code); } } - if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { + int32_t numOfTotal = pState->numOfTotal; + + int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1); + if (finished < numOfTotal) { + tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished); return tscFreeSubSqlObj(trsupport, pSql); } // all subqueries are failed - tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, trsupport->pState->numOfTotal, - trsupport->pState->code); - pPObj->res.code = -(trsupport->pState->code); + tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, pState->numOfTotal, pState->code); + pPObj->res.code = -(pState->code); // release allocated resource tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel, - trsupport->pState->numOfTotal); + pState->numOfTotal); tfree(trsupport->pState); tscFreeSubSqlObj(trsupport, pSql); @@ -1117,7 +1131,9 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; } else { // in case of second stage join subquery, invoke its callback function instead of regular QueueAsyncRes - if ((pPObj->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0); + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == TSDB_QUERY_TYPE_JOIN_SEC_STAGE) { (*pPObj->fp)(pPObj->param, pPObj, pPObj->res.code); } else { // regular super table query if (pPObj->res.code != TSDB_CODE_SUCCESS) { @@ -1129,43 +1145,56 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { SRetrieveSupport *trsupport = (SRetrieveSupport *)param; - int32_t idx = trsupport->vnodeIdx; + int32_t idx = trsupport->subqueryIndex; SSqlObj * pPObj = trsupport->pParentSqlObj; tOrderDescriptor *pDesc = trsupport->pOrderDescriptor; SSqlObj *pSql = (SSqlObj *)tres; - if (pSql == NULL) { - /* sql object has been released in error process, return immediately */ + if (pSql == NULL) { // sql object has been released in error process, return immediately tscTrace("%p subquery has been released, idx:%d, abort", pPObj, idx); return; } + SSubqueryState* pState = trsupport->pState; + assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 && + pPObj->numOfSubs == pState->numOfTotal); + // query process and cancel query process may execute at the same time pthread_mutex_lock(&trsupport->queryMutex); - if (numOfRows < 0 || trsupport->pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) { + if (numOfRows < 0 || pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) { return tscHandleSubRetrievalError(trsupport, pSql, numOfRows); } - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlRes * pRes = &pSql->res; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SVnodeSidList *vnodeInfo = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, idx); SVPeerDesc * pSvd = &vnodeInfo->vpeerDesc[vnodeInfo->index]; if (numOfRows > 0) { assert(pRes->numOfRows == numOfRows); - atomic_add_fetch_64(&trsupport->pState->numOfRetrievedRows, numOfRows); + int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows); tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql, - pRes->numOfRows, trsupport->pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx); + pRes->numOfRows, pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx); + + if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { + tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId64 " , current:%" PRId64, + pPObj, pSql, tsMaxNumOfOrderedResults, num); + tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_SORTED_RES_TOO_MANY); + return; + } + #ifdef _DEBUG_VIEW printf("received data from vnode: %d rows\n", pRes->numOfRows); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, &pPObj->cmd); - tColModelDisplayEx(pDesc->pSchema, pRes->data, pRes->numOfRows, pRes->numOfRows, colInfo); + + tscGetSrcColumnInfo(colInfo, pQueryInfo); + tColModelDisplayEx(pDesc->pColumnModel, pRes->data, pRes->numOfRows, pRes->numOfRows, colInfo); #endif if (tsTotalTmpDirGB != 0 && tsAvailTmpDirGB < tsMinimalTmpDirGB) { tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pPObj, pSql, @@ -1173,8 +1202,9 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); return; } + int32_t ret = saveToBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, pRes->data, - pRes->numOfRows, pCmd->groupbyExpr.orderType); + pRes->numOfRows, pQueryInfo->groupbyExpr.orderType); if (ret < 0) { // set no disk space error info, and abort retry tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); @@ -1185,20 +1215,20 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { } else { // all data has been retrieved to client /* data in from current vnode is stored in cache and disk */ - uint32_t numOfRowsFromVnode = - trsupport->pExtMemBuffer[pCmd->vnodeIdx]->numOfAllElems + trsupport->localBuffer->numOfElems; + uint32_t numOfRowsFromVnode = trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->numOfElems; tscTrace("%p sub:%p all data retrieved from ip:%u,vid:%d, numOfRows:%d, orderOfSub:%d", pPObj, pSql, pSvd->ip, pSvd->vnode, numOfRowsFromVnode, idx); - tColModelCompact(pDesc->pSchema, trsupport->localBuffer, pDesc->pSchema->maxCapacity); + tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity); #ifdef _DEBUG_VIEW - printf("%ld rows data flushed to disk:\n", trsupport->localBuffer->numOfElems); + printf("%" PRIu64 " rows data flushed to disk:\n", trsupport->localBuffer->numOfElems); SSrcColumnInfo colInfo[256] = {0}; - tscGetSrcColumnInfo(colInfo, &pPObj->cmd); - tColModelDisplayEx(pDesc->pSchema, trsupport->localBuffer->data, trsupport->localBuffer->numOfElems, + tscGetSrcColumnInfo(colInfo, pQueryInfo); + tColModelDisplayEx(pDesc->pColumnModel, trsupport->localBuffer->data, trsupport->localBuffer->numOfElems, trsupport->localBuffer->numOfElems, colInfo); #endif + if (tsTotalTmpDirGB != 0 && tsAvailTmpDirGB < tsMinimalTmpDirGB) { tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pPObj, pSql, tsAvailTmpDirGB, tsMinimalTmpDirGB); @@ -1208,25 +1238,34 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { // each result for a vnode is ordered as an independant list, // then used as an input of loser tree for disk-based merge routine - int32_t ret = - tscFlushTmpBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, pCmd->groupbyExpr.orderType); + int32_t ret = tscFlushTmpBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, + pQueryInfo->groupbyExpr.orderType); if (ret != 0) { /* set no disk space error info, and abort retry */ return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); } - - if (atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1) < trsupport->pState->numOfTotal) { + + // keep this value local variable, since the pState variable may be released by other threads, if atomic_add opertion + // increases the finished value up to pState->numOfTotal value, which means all subqueries are completed. + // In this case, the comparsion between finished value and released pState->numOfTotal is not safe. + int32_t numOfTotal = pState->numOfTotal; + + int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1); + if (finished < numOfTotal) { + tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished); return tscFreeSubSqlObj(trsupport, pSql); } // all sub-queries are returned, start to local merge process - pDesc->pSchema->maxCapacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage; + pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage; tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%d,start to build loser tree", pPObj, - trsupport->pState->numOfTotal, trsupport->pState->numOfCompleted); + pState->numOfTotal, pState->numOfRetrievedRows); + + SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0); + tscClearInterpInfo(pPQueryInfo); - tscClearInterpInfo(&pPObj->cmd); - tscCreateLocalReducer(trsupport->pExtMemBuffer, trsupport->pState->numOfTotal, pDesc, trsupport->pFinalColModel, + tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel, &pPObj->cmd, &pPObj->res); tscTrace("%p build loser tree completed", pPObj); @@ -1235,7 +1274,8 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { pPObj->res.row = 0; // only free once - free(trsupport->pState); + tfree(trsupport->pState); + tscFreeSubSqlObj(trsupport, pSql); if (pPObj->fp == NULL) { @@ -1256,7 +1296,10 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { } void tscKillMetricQuery(SSqlObj *pSql) { - if (!tscIsTwoStageMergeMetricQuery(&pSql->cmd)) { + SSqlCmd* pCmd = &pSql->cmd; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (!tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) { return; } @@ -1299,10 +1342,20 @@ void tscKillMetricQuery(SSqlObj *pSql) { static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int retCode); static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) { - SSqlObj *pNew = createSubqueryObj(pSql, trsupport->vnodeIdx, 0, tscRetrieveDataRes, trsupport, prevSqlObj); + const int32_t table_index = 0; + + SSqlObj *pNew = createSubqueryObj(pSql, table_index, tscRetrieveDataRes, trsupport, prevSqlObj); if (pNew != NULL) { // the sub query of two-stage super table query - pNew->cmd.type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; - pSql->pSubs[trsupport->vnodeIdx] = pNew; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; + + assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1); + + // launch subquery for each vnode, so the subquery index equals to the vnodeIndex. + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, table_index); + pMeterMetaInfo->vnodeIndex = trsupport->subqueryIndex; + + pSql->pSubs[trsupport->subqueryIndex] = pNew; } return pNew; @@ -1310,10 +1363,14 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { SRetrieveSupport *trsupport = (SRetrieveSupport *)param; - - SSqlObj * pSql = (SSqlObj *)tres; - int32_t idx = pSql->cmd.vnodeIdx; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + SSqlObj* pParentSql = trsupport->pParentSqlObj; + SSqlObj* pSql = (SSqlObj *)tres; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + assert(pSql->cmd.numOfClause == 1 && pSql->cmd.pQueryInfo[0]->numOfTables == 1); + + int32_t idx = pMeterMetaInfo->vnodeIndex; SVnodeSidList *vnodeInfo = NULL; SVPeerDesc * pSvd = NULL; @@ -1322,16 +1379,20 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { pSvd = &vnodeInfo->vpeerDesc[vnodeInfo->index]; } - if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS || trsupport->pState->code != TSDB_CODE_SUCCESS) { + SSubqueryState* pState = trsupport->pState; + assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 && + pParentSql->numOfSubs == pState->numOfTotal); + + if (pParentSql->res.code != TSDB_CODE_SUCCESS || pState->code != TSDB_CODE_SUCCESS) { // metric query is killed, Note: code must be less than 0 trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; - if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS) { - code = -(int)(trsupport->pParentSqlObj->res.code); + if (pParentSql->res.code != TSDB_CODE_SUCCESS) { + code = -(int)(pParentSql->res.code); } else { - code = trsupport->pState->code; + code = pState->code; } - tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, code); + tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", pParentSql, pSql, + trsupport->subqueryIndex, code); } /* @@ -1343,49 +1404,54 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { */ if (code != TSDB_CODE_SUCCESS) { if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) { - tscTrace("%p sub:%p reach the max retry count,set global code:%d", trsupport->pParentSqlObj, pSql, code); - atomic_val_compare_exchange_32(&trsupport->pState->code, 0, code); + tscTrace("%p sub:%p reach the max retry count,set global code:%d", pParentSql, pSql, code); + atomic_val_compare_exchange_32(&pState->code, 0, code); } else { // does not reach the maximum retry count, go on - tscTrace("%p sub:%p failed code:%d, retry:%d", trsupport->pParentSqlObj, pSql, code, trsupport->numOfRetry); + tscTrace("%p sub:%p failed code:%d, retry:%d", pParentSql, pSql, code, trsupport->numOfRetry); - SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); + SSqlObj *pNew = tscCreateSqlObjForSubquery(pParentSql, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d", - trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->vnodeIdx); + trsupport->pParentSqlObj, pSql, pSvd != NULL ? pSvd->vnode : -1, trsupport->subqueryIndex); - trsupport->pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY; + pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; } else { - assert(pNew->cmd.pMeterInfo[0]->pMeterMeta != NULL && pNew->cmd.pMeterInfo[0]->pMetricMeta != NULL); + SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + assert(pNewQueryInfo->pMeterInfo[0]->pMeterMeta != NULL && pNewQueryInfo->pMeterInfo[0]->pMetricMeta != NULL); tscProcessSql(pNew); return; } } } - if (trsupport->pState->code != TSDB_CODE_SUCCESS) { // failed, abort + if (pState->code != TSDB_CODE_SUCCESS) { // failed, abort if (vnodeInfo != NULL) { - tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, + tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", pParentSql, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, - trsupport->vnodeIdx, trsupport->pState->code); + trsupport->subqueryIndex, pState->code); } else { - tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, trsupport->pState->code); + tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", pParentSql, pSql, + trsupport->subqueryIndex, pState->code); } - tscRetrieveFromVnodeCallBack(param, tres, trsupport->pState->code); + tscRetrieveFromVnodeCallBack(param, tres, pState->code); } else { // success, proceed to retrieve data from dnode - tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, + if (vnodeInfo != NULL) { + tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, - trsupport->vnodeIdx); + trsupport->subqueryIndex); + } else { + tscTrace("%p sub:%p query complete, orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, + trsupport->subqueryIndex); + } taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param); } } -int tscBuildRetrieveMsg(SSqlObj *pSql) { +int tscBuildRetrieveMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char *pMsg, *pStart; - int msgLen = 0; pStart = pSql->cmd.payload + tsRpcHeadSize; pMsg = pStart; @@ -1393,20 +1459,20 @@ int tscBuildRetrieveMsg(SSqlObj *pSql) { *((uint64_t *)pMsg) = pSql->res.qhandle; pMsg += sizeof(pSql->res.qhandle); - *((uint16_t*)pMsg) = htons(pSql->cmd.type); - pMsg += sizeof(pSql->cmd.type); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + *((uint16_t *)pMsg) = htons(pQueryInfo->type); + pMsg += sizeof(pQueryInfo->type); - msgLen = pMsg - pStart; - pSql->cmd.payloadLen = msgLen; + pSql->cmd.payloadLen = pMsg - pStart; pSql->cmd.msgType = TSDB_MSG_TYPE_RETRIEVE; - return msgLen; + return TSDB_CODE_SUCCESS; } void tscUpdateVnodeInSubmitMsg(SSqlObj *pSql, char *buf) { SShellSubmitMsg *pShellMsg; char * pMsg; - SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, 0); SMeterMeta *pMeterMeta = pMeterMetaInfo->pMeterMeta; @@ -1414,45 +1480,49 @@ void tscUpdateVnodeInSubmitMsg(SSqlObj *pSql, char *buf) { pShellMsg = (SShellSubmitMsg *)pMsg; pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); - tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pSql->index].ip), htons(pShellMsg->vnode)); + tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pSql->index].ip), + htons(pShellMsg->vnode)); } -int tscBuildSubmitMsg(SSqlObj *pSql) { +int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SShellSubmitMsg *pShellMsg; char * pMsg, *pStart; - int msgLen = 0; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + + SMeterMeta *pMeterMeta = pMeterMetaInfo->pMeterMeta; pStart = pSql->cmd.payload + tsRpcHeadSize; pMsg = pStart; pShellMsg = (SShellSubmitMsg *)pMsg; - pShellMsg->import = pSql->cmd.import; + + pShellMsg->import = htons(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT) ? 0 : 1); pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode); - pShellMsg->numOfSid = htonl(pSql->cmd.count); // number of meters to be inserted + pShellMsg->numOfSid = htonl(pSql->cmd.numOfTablesInSubmit); // number of meters to be inserted // pSql->cmd.payloadLen is set during parse sql routine, so we do not use it here pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT; - tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pMeterMeta->index].ip), htons(pShellMsg->vnode)); - - return msgLen; + tscTrace("%p update submit msg vnode:%s:%d", pSql, taosIpStr(pMeterMeta->vpeerDesc[pMeterMeta->index].ip), + htons(pShellMsg->vnode)); + + return TSDB_CODE_SUCCESS; } void tscUpdateVnodeInQueryMsg(SSqlObj *pSql, char *buf) { SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); char * pStart = buf + tsRpcHeadSize; SQueryMeterMsg *pQueryMsg = (SQueryMeterMsg *)pStart; - if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { // pSchema == NULL, query on meter + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { // pColumnModel == NULL, query on meter SMeterMeta *pMeterMeta = pMeterMetaInfo->pMeterMeta; pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); } else { // query on metric SMetricMeta * pMetricMeta = pMeterMetaInfo->pMetricMeta; - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); pQueryMsg->vnode = htons(pVnodeSidList->vpeerDesc[pSql->index].vnode); } } @@ -1461,44 +1531,89 @@ void tscUpdateVnodeInQueryMsg(SSqlObj *pSql, char *buf) { * for meter query, simply return the size <= 1k * for metric query, estimate size according to meter tags */ -static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) { +static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) { const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5; - int32_t srcColListSize = pCmd->numOfCols * sizeof(SColumnInfo); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + + int32_t srcColListSize = pQueryInfo->colList.numOfCols * sizeof(SColumnInfo); - int32_t exprSize = sizeof(SSqlFuncExprMsg) * pCmd->fieldsInfo.numOfOutputCols; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + int32_t exprSize = sizeof(SSqlFuncExprMsg) * pQueryInfo->fieldsInfo.numOfOutputCols; + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); // meter query without tags values - if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryMeterMsg) + srcColListSize + exprSize; } SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); int32_t meterInfoSize = (pMetricMeta->tagLen + sizeof(SMeterSidExtInfo)) * pVnodeSidList->numOfSids; - int32_t outputColumnSize = pCmd->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg); + int32_t outputColumnSize = pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg); int32_t size = meterInfoSize + outputColumnSize + srcColListSize + exprSize + MIN_QUERY_MSG_PKT_SIZE; - if (pCmd->tsBuf != NULL) { - size += pCmd->tsBuf->fileSize; + if (pQueryInfo->tsBuf != NULL) { + size += pQueryInfo->tsBuf->fileSize; } return size; } -int tscBuildQueryMsg(SSqlObj *pSql) { +static char *doSerializeTableInfo(SSqlObj *pSql, int32_t numOfMeters, int32_t vnodeId, char *pMsg) { + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, 0); + + SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; + SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; + + tscTrace("%p vid:%d, query on %d meters", pSql, htons(vnodeId), numOfMeters); + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { +#ifdef _DEBUG_VIEW + tscTrace("%p sid:%d, uid:%" PRIu64, pSql, pMeterMetaInfo->pMeterMeta->sid, pMeterMetaInfo->pMeterMeta->uid); +#endif + SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg; + pMeterInfo->sid = htonl(pMeterMeta->sid); + pMeterInfo->uid = htobe64(pMeterMeta->uid); + pMeterInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pMeterMeta->uid)); + pMsg += sizeof(SMeterSidExtInfo); + } else { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); + + for (int32_t i = 0; i < numOfMeters; ++i) { + SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg; + SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i); + + pMeterInfo->sid = htonl(pQueryMeterInfo->sid); + pMeterInfo->uid = htobe64(pQueryMeterInfo->uid); + pMeterInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pQueryMeterInfo->uid)); + + pMsg += sizeof(SMeterSidExtInfo); + + memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen); + pMsg += pMetricMeta->tagLen; + +#ifdef _DEBUG_VIEW + tscTrace("%p sid:%d, uid:%" PRId64, pSql, pQueryMeterInfo->sid, pQueryMeterInfo->uid); +#endif + } + } + + return pMsg; +} + +int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - int32_t size = tscEstimateQueryMsgSize(pCmd); + int32_t size = tscEstimateQueryMsgSize(pCmd, pCmd->clauseIndex); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for query msg", pSql); return -1; } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + char * pStart = pCmd->payload + tsRpcHeadSize; SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta; @@ -1518,14 +1633,13 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode); pQueryMsg->uid = pMeterMeta->uid; pQueryMsg->numOfTagsCols = 0; - } else { // query on metric - SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; - if (pCmd->vnodeIdx < 0) { - tscError("%p error vnodeIdx:%d", pSql, pCmd->vnodeIdx); + } else { // query on super table + if (pMeterMetaInfo->vnodeIndex < 0) { + tscError("%p error vnodeIdx:%d", pSql, pMeterMetaInfo->vnodeIndex); return -1; } - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); uint32_t vnodeId = pVnodeSidList->vpeerDesc[pVnodeSidList->index].vnode; numOfMeters = pVnodeSidList->numOfSids; @@ -1541,26 +1655,25 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->numOfSids = htonl(numOfMeters); pQueryMsg->numOfTagsCols = htons(pMeterMetaInfo->numOfTags); - if (pCmd->order.order == TSQL_SO_ASC) { - pQueryMsg->skey = htobe64(pCmd->stime); - pQueryMsg->ekey = htobe64(pCmd->etime); + if (pQueryInfo->order.order == TSQL_SO_ASC) { + pQueryMsg->skey = htobe64(pQueryInfo->stime); + pQueryMsg->ekey = htobe64(pQueryInfo->etime); } else { - pQueryMsg->skey = htobe64(pCmd->etime); - pQueryMsg->ekey = htobe64(pCmd->stime); + pQueryMsg->skey = htobe64(pQueryInfo->etime); + pQueryMsg->ekey = htobe64(pQueryInfo->stime); } - pQueryMsg->num = htonl(0); - pQueryMsg->order = htons(pCmd->order.order); - pQueryMsg->orderColId = htons(pCmd->order.orderColId); + pQueryMsg->order = htons(pQueryInfo->order.order); + pQueryMsg->orderColId = htons(pQueryInfo->order.orderColId); - pQueryMsg->interpoType = htons(pCmd->interpoType); + pQueryMsg->interpoType = htons(pQueryInfo->interpoType); - pQueryMsg->limit = htobe64(pCmd->limit.limit); - pQueryMsg->offset = htobe64(pCmd->limit.offset); + pQueryMsg->limit = htobe64(pQueryInfo->limit.limit); + pQueryMsg->offset = htobe64(pQueryInfo->limit.offset); - pQueryMsg->numOfCols = htons(pCmd->colList.numOfCols); + pQueryMsg->numOfCols = htons(pQueryInfo->colList.numOfCols); - if (pCmd->colList.numOfCols <= 0) { + if (pQueryInfo->colList.numOfCols <= 0) { tscError("%p illegal value of numOfCols in query msg: %d", pSql, pMeterMeta->numOfColumns); return -1; } @@ -1570,19 +1683,21 @@ int tscBuildQueryMsg(SSqlObj *pSql) { return -1; } - pQueryMsg->nAggTimeInterval = htobe64(pCmd->nAggTimeInterval); - pQueryMsg->intervalTimeUnit = pCmd->intervalTimeUnit; - if (pCmd->nAggTimeInterval < 0) { - tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pCmd->nAggTimeInterval); + pQueryMsg->nAggTimeInterval = htobe64(pQueryInfo->nAggTimeInterval); + pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit; + pQueryMsg->slidingTime = htobe64(pQueryInfo->nSlidingTime); + + if (pQueryInfo->nAggTimeInterval < 0) { + tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->nAggTimeInterval); return -1; } - if (pCmd->groupbyExpr.numOfGroupCols < 0) { - tscError("%p illegal value of numOfGroupCols in query msg: %d", pSql, pCmd->groupbyExpr.numOfGroupCols); + if (pQueryInfo->groupbyExpr.numOfGroupCols < 0) { + tscError("%p illegal value of numOfGroupCols in query msg: %d", pSql, pQueryInfo->groupbyExpr.numOfGroupCols); return -1; } - pQueryMsg->numOfGroupCols = htons(pCmd->groupbyExpr.numOfGroupCols); + pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { // query on meter pQueryMsg->tagLength = 0; @@ -1590,20 +1705,21 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->tagLength = htons(pMetricMeta->tagLen); } - pQueryMsg->queryType = htons(pCmd->type); - pQueryMsg->numOfOutputCols = htons(pCmd->exprsInfo.numOfExprs); + pQueryMsg->queryType = htons(pQueryInfo->type); + pQueryMsg->numOfOutputCols = htons(pQueryInfo->exprsInfo.numOfExprs); - if (pCmd->fieldsInfo.numOfOutputCols < 0) { - tscError("%p illegal value of number of output columns in query msg: %d", pSql, pCmd->fieldsInfo.numOfOutputCols); + if (pQueryInfo->fieldsInfo.numOfOutputCols < 0) { + tscError("%p illegal value of number of output columns in query msg: %d", pSql, + pQueryInfo->fieldsInfo.numOfOutputCols); return -1; } // set column list ids - char * pMsg = (char *)(pQueryMsg->colList) + pCmd->colList.numOfCols * sizeof(SColumnInfo); + char * pMsg = (char *)(pQueryMsg->colList) + pQueryInfo->colList.numOfCols * sizeof(SColumnInfo); SSchema *pSchema = tsGetSchema(pMeterMeta); - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { - SColumnBase *pCol = tscColumnBaseInfoGet(&pCmd->colList, i); + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { + SColumnBase *pCol = tscColumnBaseInfoGet(&pQueryInfo->colList, i); SSchema * pColSchema = &pSchema[pCol->colIndex.columnIndex]; if (pCol->colIndex.columnIndex >= pMeterMeta->numOfColumns || pColSchema->type < TSDB_DATA_TYPE_BOOL || @@ -1652,14 +1768,14 @@ int tscBuildQueryMsg(SSqlObj *pSql) { SSqlFuncExprMsg *pSqlFuncExpr = (SSqlFuncExprMsg *)pMsg; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_ARITHM) { hasArithmeticFunction = true; } - if (!tscValidateColumnId(pCmd, pExpr->colInfo.colId)) { + if (!tscValidateColumnId(pMeterMetaInfo, pExpr->colInfo.colId)) { /* column id is not valid according to the cached metermeta, the meter meta is expired */ tscError("%p table schema is not matched with parsed sql", pSql); return -1; @@ -1692,8 +1808,8 @@ int tscBuildQueryMsg(SSqlObj *pSql) { int32_t len = 0; if (hasArithmeticFunction) { - SColumnBase *pColBase = pCmd->colList.pColList; - for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { + SColumnBase *pColBase = pQueryInfo->colList.pColList; + for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) { char * name = pSchema[pColBase[i].colIndex.columnIndex].name; int32_t lenx = strlen(name); memcpy(pMsg, name, lenx); @@ -1706,34 +1822,8 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->colNameLen = htonl(len); - // set sids list - tscTrace("%p vid:%d, query on %d meters", pSql, pSql->cmd.vnodeIdx, numOfMeters); - if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { -#ifdef _DEBUG_VIEW - - tscTrace("%p %d", pSql, pMeterMetaInfo->pMeterMeta->sid); -#endif - SMeterSidExtInfo *pSMeterTagInfo = (SMeterSidExtInfo *)pMsg; - pSMeterTagInfo->sid = htonl(pMeterMeta->sid); - pMsg += sizeof(SMeterSidExtInfo); - } else { - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); - - for (int32_t i = 0; i < numOfMeters; ++i) { - SMeterSidExtInfo *pMeterTagInfo = (SMeterSidExtInfo *)pMsg; - SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i); - - pMeterTagInfo->sid = htonl(pQueryMeterInfo->sid); - pMsg += sizeof(SMeterSidExtInfo); - -#ifdef _DEBUG_VIEW - tscTrace("%p %d", pSql, pQueryMeterInfo->sid); -#endif - - memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen); - pMsg += pMetricMeta->tagLen; - } - } + // serialize the table info (sid, uid, tags) + pMsg = doSerializeTableInfo(pSql, numOfMeters, htons(pQueryMsg->vnode), pMsg); // only include the required tag column schema. If a tag is not required, it won't be sent to vnode if (pMeterMetaInfo->numOfTags > 0) { @@ -1753,7 +1843,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) { } } - SSqlGroupbyExpr *pGroupbyExpr = &pCmd->groupbyExpr; + SSqlGroupbyExpr *pGroupbyExpr = &pQueryInfo->groupbyExpr; if (pGroupbyExpr->numOfGroupCols != 0) { pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex); pQueryMsg->orderType = htons(pGroupbyExpr->orderType); @@ -1775,10 +1865,10 @@ int tscBuildQueryMsg(SSqlObj *pSql) { } } - if (pCmd->interpoType != TSDB_INTERPO_NONE) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - *((int64_t *)pMsg) = htobe64(pCmd->defaultVal[i]); - pMsg += sizeof(pCmd->defaultVal[0]); + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + *((int64_t *)pMsg) = htobe64(pQueryInfo->defaultVal[i]); + pMsg += sizeof(pQueryInfo->defaultVal[0]); } } @@ -1787,13 +1877,13 @@ int tscBuildQueryMsg(SSqlObj *pSql) { int32_t tsLen = 0; int32_t numOfBlocks = 0; - if (pCmd->tsBuf != NULL) { - STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pCmd->vnodeIdx); - assert(QUERY_IS_JOIN_QUERY(pCmd->type) && pBlockInfo != NULL); // this query should not be sent + if (pQueryInfo->tsBuf != NULL) { + STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pQueryInfo->tsBuf, pMeterMetaInfo->vnodeIndex); + assert(QUERY_IS_JOIN_QUERY(pQueryInfo->type) && pBlockInfo != NULL); // this query should not be sent // todo refactor - fseek(pCmd->tsBuf->f, pBlockInfo->offset, SEEK_SET); - fread(pMsg, pBlockInfo->compLen, 1, pCmd->tsBuf->f); + fseek(pQueryInfo->tsBuf->f, pBlockInfo->offset, SEEK_SET); + fread(pMsg, pBlockInfo->compLen, 1, pQueryInfo->tsBuf->f); pMsg += pBlockInfo->compLen; tsLen = pBlockInfo->compLen; @@ -1802,8 +1892,8 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->tsLen = htonl(tsLen); pQueryMsg->tsNumOfBlocks = htonl(numOfBlocks); - if (pCmd->tsBuf != NULL) { - pQueryMsg->tsOrder = htonl(pCmd->tsBuf->tsOrder); + if (pQueryInfo->tsBuf != NULL) { + pQueryMsg->tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); } msgLen = pMsg - pStart; @@ -1813,346 +1903,277 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } -int tscBuildCreateDbMsg(SSqlObj *pSql) { +int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SCreateDbMsg *pCreateDbMsg; char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - pStart = pCmd->payload + tsRpcHeadSize; - pMsg = pStart; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + SSqlCmd *pCmd = &pSql->cmd; + pMsg = doBuildMsgHeader(pSql, &pStart); pCreateDbMsg = (SCreateDbMsg *)pMsg; + + assert(pCmd->numOfClause == 1); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + strncpy(pCreateDbMsg->db, pMeterMetaInfo->name, tListLen(pCreateDbMsg->db)); pMsg += sizeof(SCreateDbMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_CREATE_DB; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildCreateDnodeMsg(SSqlObj *pSql) { +int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SCreateDnodeMsg *pCreate; - char * pMsg, *pStart; - int msgLen = 0; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + char *pMsg, *pStart; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SSqlCmd *pCmd = &pSql->cmd; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); pCreate = (SCreateDnodeMsg *)pMsg; - strcpy(pCreate->ip, pMeterMetaInfo->name); + strncpy(pCreate->ip, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); pMsg += sizeof(SCreateDnodeMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_CREATE_PNODE; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_DNODE; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropDnodeMsg(SSqlObj *pSql) { - SDropDnodeMsg *pDrop; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - - pDrop = (SDropDnodeMsg *)pMsg; - strcpy(pDrop->ip, pMeterMetaInfo->name); +int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SCreateAcctMsg *pAlterMsg; + char * pMsg, *pStart; + int msgLen = 0; - pMsg += sizeof(SDropDnodeMsg); + SSqlCmd *pCmd = &pSql->cmd; - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_PNODE; + pMsg = doBuildMsgHeader(pSql, &pStart); - return msgLen; -} + pAlterMsg = (SCreateAcctMsg *)pMsg; -int tscBuildCreateUserMsg(SSqlObj *pSql) { - SCreateUserMsg *pCreateMsg; - char * pMsg, *pStart; - int msgLen = 0; + SSQLToken *pName = &pInfo->pDCLInfo->user.user; + SSQLToken *pPwd = &pInfo->pDCLInfo->user.passwd; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + strncpy(pAlterMsg->user, pName->z, pName->n); + strncpy(pAlterMsg->pass, pPwd->z, pPwd->n); - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + pMsg += sizeof(SCreateAcctMsg); - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + SCreateAcctSQL *pAcctOpt = &pInfo->pDCLInfo->acctOpt; - pCreateMsg = (SCreateUserMsg *)pMsg; - strcpy(pCreateMsg->user, pMeterMetaInfo->name); - strcpy(pCreateMsg->pass, pCmd->payload); + pAlterMsg->cfg.maxUsers = htonl(pAcctOpt->maxUsers); + pAlterMsg->cfg.maxDbs = htonl(pAcctOpt->maxDbs); + pAlterMsg->cfg.maxTimeSeries = htonl(pAcctOpt->maxTimeSeries); + pAlterMsg->cfg.maxStreams = htonl(pAcctOpt->maxStreams); + pAlterMsg->cfg.maxPointsPerSecond = htonl(pAcctOpt->maxPointsPerSecond); + pAlterMsg->cfg.maxStorage = htobe64(pAcctOpt->maxStorage); + pAlterMsg->cfg.maxQueryTime = htobe64(pAcctOpt->maxQueryTime); + pAlterMsg->cfg.maxConnections = htonl(pAcctOpt->maxConnections); - pMsg += sizeof(SCreateUserMsg); + if (pAcctOpt->stat.n == 0) { + pAlterMsg->cfg.accessState = -1; + } else { + if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) { + pAlterMsg->cfg.accessState = TSDB_VN_READ_ACCCESS; + } else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) { + pAlterMsg->cfg.accessState = TSDB_VN_WRITE_ACCCESS; + } else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) { + pAlterMsg->cfg.accessState = TSDB_VN_ALL_ACCCESS; + } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { + pAlterMsg->cfg.accessState = 0; + } + } msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_CREATE_USER; - return msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_ACCT; + return TSDB_CODE_SUCCESS; } -static int tscBuildAcctMsgImpl(SSqlObj *pSql) { - SCreateAcctMsg *pAlterMsg; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SCreateUserMsg *pAlterMsg; + char * pMsg, *pStart; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SSqlCmd *pCmd = &pSql->cmd; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); + pAlterMsg = (SCreateUserMsg *)pMsg; - pAlterMsg = (SCreateAcctMsg *)pMsg; - strcpy(pAlterMsg->user, pMeterMetaInfo->name); - strcpy(pAlterMsg->pass, pCmd->payload); + SUserInfo *pUser = &pInfo->pDCLInfo->user; + strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n); + + pAlterMsg->flag = pUser->type; - pMsg += sizeof(SCreateAcctMsg); + if (pUser->type == TSDB_ALTER_USER_PRIVILEGES) { + pAlterMsg->privilege = (char)pCmd->count; + } else if (pUser->type == TSDB_ALTER_USER_PASSWD) { + strncpy(pAlterMsg->pass, pUser->passwd.z, pUser->passwd.n); + } else { // create user password info + strncpy(pAlterMsg->pass, pUser->passwd.z, pUser->passwd.n); + } - pAlterMsg->cfg.maxUsers = htonl((int32_t)pCmd->defaultVal[0]); - pAlterMsg->cfg.maxDbs = htonl((int32_t)pCmd->defaultVal[1]); - pAlterMsg->cfg.maxTimeSeries = htonl((int32_t)pCmd->defaultVal[2]); - pAlterMsg->cfg.maxStreams = htonl((int32_t)pCmd->defaultVal[3]); - pAlterMsg->cfg.maxPointsPerSecond = htonl((int32_t)pCmd->defaultVal[4]); - pAlterMsg->cfg.maxStorage = htobe64(pCmd->defaultVal[5]); - pAlterMsg->cfg.maxQueryTime = htobe64(pCmd->defaultVal[6]); - pAlterMsg->cfg.maxConnections = htonl((int32_t)pCmd->defaultVal[7]); - pAlterMsg->cfg.accessState = (int8_t)pCmd->defaultVal[8]; + pMsg += sizeof(SCreateUserMsg); + pCmd->payloadLen = pMsg - pStart; - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + if (pUser->type == TSDB_ALTER_USER_PASSWD || pUser->type == TSDB_ALTER_USER_PRIVILEGES) { + pCmd->msgType = TSDB_MSG_TYPE_ALTER_USER; + } else { + pCmd->msgType = TSDB_MSG_TYPE_CREATE_USER; + } - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildCreateAcctMsg(SSqlObj *pSql) { - int msgLen = tscBuildAcctMsgImpl(pSql); - pSql->cmd.msgType = TSDB_MSG_TYPE_CREATE_ACCT; - return msgLen; -} +int32_t tscBuildCfgDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + char * pStart = NULL; + SSqlCmd *pCmd = &pSql->cmd; -int tscBuildAlterAcctMsg(SSqlObj *pSql) { - int msgLen = tscBuildAcctMsgImpl(pSql); - pSql->cmd.msgType = TSDB_MSG_TYPE_ALTER_ACCT; - return msgLen; -} + char *pMsg = doBuildMsgHeader(pSql, &pStart); + pMsg += sizeof(SCfgMsg); -int tscBuildAlterUserMsg(SSqlObj *pSql) { - SAlterUserMsg *pAlterMsg; - char * pMsg, *pStart; - int msgLen = 0; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_CFG_PNODE; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + return TSDB_CODE_SUCCESS; +} - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; +char *doBuildMsgHeader(SSqlObj *pSql, char **pStart) { + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + + char *pMsg = pCmd->payload + tsRpcHeadSize; + *pStart = pMsg; SMgmtHead *pMgmt = (SMgmtHead *)pMsg; strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - pAlterMsg = (SCreateUserMsg *)pMsg; - strcpy(pAlterMsg->user, pMeterMetaInfo->name); - strcpy(pAlterMsg->pass, pCmd->payload); - pAlterMsg->flag = pCmd->order.order; - pAlterMsg->privilege = (char)pCmd->count; - - pMsg += sizeof(SAlterUserMsg); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_ALTER_USER; + pMsg += sizeof(SMgmtHead); - return msgLen; + return pMsg; } -int tscBuildCfgDnodeMsg(SSqlObj *pSql) { - SCfgMsg *pCfg; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropDbMsg *pDropDbMsg; + char * pMsg, *pStart; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SSqlCmd *pCmd = &pSql->cmd; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); + pDropDbMsg = (SDropDbMsg *)pMsg; - pCfg = (SCfgMsg *)pMsg; - strcpy(pCfg->ip, pMeterMetaInfo->name); - strcpy(pCfg->config, pCmd->payload); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + strncpy(pDropDbMsg->db, pMeterMetaInfo->name, tListLen(pDropDbMsg->db)); + pDropDbMsg->ignoreNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0; - pMsg += sizeof(SCfgMsg); + pMsg += sizeof(SDropDbMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_CFG_PNODE; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_DROP_DB; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropDbMsg(SSqlObj *pSql) { - SDropDbMsg *pDropDbMsg; - char * pMsg, *pStart; - int msgLen = 0; +int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropTableMsg *pDropTableMsg; + char * pMsg, *pStart; + int msgLen = 0; - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd *pCmd = &pSql->cmd; - pMsg = pCmd->payload + tsRpcHeadSize; + //pMsg = doBuildMsgHeader(pSql, &pStart); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + + pMsg = pCmd->payload + tsRpcHeadSize; pStart = pMsg; SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); + tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); pMsg += sizeof(SMgmtHead); - pDropDbMsg = (SDropDbMsg *)pMsg; - strncpy(pDropDbMsg->db, pMeterMetaInfo->name, tListLen(pDropDbMsg->db)); + pDropTableMsg = (SDropTableMsg *)pMsg; - pDropDbMsg->ignoreNotExists = htons(pCmd->existsCheck ? 1 : 0); + strcpy(pDropTableMsg->meterId, pMeterMetaInfo->name); - pMsg += sizeof(SDropDbMsg); + pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0; + pMsg += sizeof(SDropTableMsg); msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_DB; + pCmd->msgType = TSDB_MSG_TYPE_DROP_TABLE; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropUserMsg(SSqlObj *pSql) { - SDropUserMsg *pDropMsg; - char * pMsg, *pStart; - int msgLen = 0; +int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropDnodeMsg *pDrop; + char * pMsg, *pStart; SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); + pDrop = (SDropDnodeMsg *)pMsg; - pDropMsg = (SDropUserMsg *)pMsg; - strcpy(pDropMsg->user, pMeterMetaInfo->name); + strcpy(pDrop->ip, pMeterMetaInfo->name); - pMsg += sizeof(SDropUserMsg); + pMsg += sizeof(SDropDnodeMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_USER; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_DROP_DNODE; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildDropAcctMsg(SSqlObj *pSql) { - SDropAcctMsg *pDropMsg; +int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { + SDropUserMsg *pDropMsg; char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + SSqlCmd *pCmd = &pSql->cmd; + + pMsg = doBuildMsgHeader(pSql, &pStart); + pDropMsg = (SDropUserMsg *)pMsg; - pDropMsg = (SDropAcctMsg *)pMsg; + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); strcpy(pDropMsg->user, pMeterMetaInfo->name); - pMsg += sizeof(SDropAcctMsg); + pMsg += sizeof(SDropUserMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_ACCT; + pCmd->payloadLen = pMsg - pStart; + pCmd->msgType = TSDB_MSG_TYPE_DROP_USER; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildUseDbMsg(SSqlObj *pSql) { +int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SUseDbMsg *pUseDbMsg; char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + SSqlCmd *pCmd = &pSql->cmd; + pMsg = doBuildMsgHeader(pSql, &pStart); pUseDbMsg = (SUseDbMsg *)pMsg; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); strcpy(pUseDbMsg->db, pMeterMetaInfo->name); pMsg += sizeof(SUseDbMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_USE_DB; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildShowMsg(SSqlObj *pSql) { +int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SShowMsg *pShowMsg; char * pMsg, *pStart; int msgLen = 0; @@ -2160,10 +2181,6 @@ int tscBuildShowMsg(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; STscObj *pObj = pSql->pTscObj; - assert(pCmd->payloadLen < TSDB_SQLCMD_SIZE); - char payload[TSDB_SQLCMD_SIZE] = {0}; - memcpy(payload, pCmd->payload, pCmd->payloadLen); - int32_t size = minMsgSize() + sizeof(SMgmtHead) + sizeof(SShowTableMsg) + pCmd->payloadLen + TSDB_EXTRA_PAYLOAD_SIZE; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for show msg", pSql); @@ -2175,11 +2192,11 @@ int tscBuildShowMsg(SSqlObj *pSql) { SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); size_t nameLen = strlen(pMeterMetaInfo->name); if (nameLen > 0) { - strcpy(pMgmt->db, pMeterMetaInfo->name); + strcpy(pMgmt->db, pMeterMetaInfo->name); // prefix is set here } else { strcpy(pMgmt->db, pObj->db); } @@ -2187,151 +2204,109 @@ int tscBuildShowMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); pShowMsg = (SShowMsg *)pMsg; - pShowMsg->type = pCmd->showType; + SShowInfo *pShowInfo = &pInfo->pDCLInfo->showOpt; - if ((pShowMsg->type == TSDB_MGMT_TABLE_TABLE || pShowMsg->type == TSDB_MGMT_TABLE_METRIC || pShowMsg->type == TSDB_MGMT_TABLE_VNODES ) && pCmd->payloadLen != 0) { - // only show tables support wildcard query - pShowMsg->payloadLen = htons(pCmd->payloadLen); - memcpy(pShowMsg->payload, payload, pCmd->payloadLen); - } + pShowMsg->type = pShowInfo->showType; + + if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) { + SSQLToken *pPattern = &pShowInfo->pattern; + if (pPattern->type > 0) { // only show tables support wildcard query + strncpy(pShowMsg->payload, pPattern->z, pPattern->n); + pShowMsg->payloadLen = htons(pPattern->n); + } + pMsg += (sizeof(SShowTableMsg) + pPattern->n); + } else { + SSQLToken *pIpAddr = &pShowInfo->prefix; + assert(pIpAddr->n > 0 && pIpAddr->type > 0); - pMsg += (sizeof(SShowTableMsg) + pCmd->payloadLen); + strncpy(pShowMsg->payload, pIpAddr->z, pIpAddr->n); + pShowMsg->payloadLen = htons(pIpAddr->n); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + pMsg += (sizeof(SShowTableMsg) + pIpAddr->n); + } + + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_SHOW; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } -int tscBuildKillQueryMsg(SSqlObj *pSql) { +int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SKillQuery *pKill; char * pMsg, *pStart; - int msgLen = 0; SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); + pMsg = doBuildMsgHeader(pSql, &pStart); pKill = (SKillQuery *)pMsg; - pKill->handle = 0; - strcpy(pKill->queryId, pCmd->payload); - - pMsg += sizeof(SKillQuery); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_KILL_QUERY; - - return msgLen; -} - -int tscBuildKillStreamMsg(SSqlObj *pSql) { - SKillStream *pKill; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - - pKill = (SKillStream *)pMsg; - pKill->handle = 0; - strcpy(pKill->queryId, pCmd->payload); - - pMsg += sizeof(SKillStream); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_KILL_STREAM; - - return msgLen; -} - -int tscBuildKillConnectionMsg(SSqlObj *pSql) { - SKillConnection *pKill; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - strcpy(pMgmt->db, pObj->db); - pMsg += sizeof(SMgmtHead); - pKill = (SKillStream *)pMsg; pKill->handle = 0; - strcpy(pKill->queryId, pCmd->payload); + strncpy(pKill->queryId, pInfo->pDCLInfo->ip.z, pInfo->pDCLInfo->ip.n); - pMsg += sizeof(SKillStream); + pMsg += sizeof(SKillQuery); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_KILL_CONNECTION; + pCmd->payloadLen = pMsg - pStart; - return msgLen; + switch (pCmd->command) { + case TSDB_SQL_KILL_QUERY: + pCmd->msgType = TSDB_MSG_TYPE_KILL_QUERY; + break; + case TSDB_SQL_KILL_CONNECTION: + pCmd->msgType = TSDB_MSG_TYPE_KILL_CONNECTION; + break; + case TSDB_SQL_KILL_STREAM: + pCmd->msgType = TSDB_MSG_TYPE_KILL_STREAM; + break; + } + return TSDB_CODE_SUCCESS; } -int tscEstimateCreateTableMsgLength(SSqlObj *pSql) { +int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &(pSql->cmd); int32_t size = minMsgSize() + sizeof(SMgmtHead) + sizeof(SCreateTableMsg); - if (pCmd->numOfCols == 0 && pCmd->count == 0) { + SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; + if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) { size += sizeof(STagData); } else { size += sizeof(SSchema) * (pCmd->numOfCols + pCmd->count); } - if (strlen(pCmd->payload) > 0) size += strlen(pCmd->payload) + 1; + if (pCreateTableInfo->pSelect != NULL) { + size += (pCreateTableInfo->pSelect->selectToken.n + 1); + } return size + TSDB_EXTRA_PAYLOAD_SIZE; } -int tscBuildCreateTableMsg(SSqlObj *pSql) { +int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SCreateTableMsg *pCreateTableMsg; char * pMsg, *pStart; int msgLen = 0; SSchema * pSchema; int size = 0; - // tmp variable to - // 1. save tags data in order to avoid too long tag values overlapped by header - // 2. save the selection clause, in create table as .. sql string - char *tmpData = calloc(1, pSql->cmd.allocSize); - - // STagData is in binary format, strncpy is not available - memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); + SSqlCmd *pCmd = &pSql->cmd; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); // Reallocate the payload size - size = tscEstimateCreateTableMsgLength(pSql); + size = tscEstimateCreateTableMsgLength(pSql, pInfo); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { tscError("%p failed to malloc for create table msg", pSql); - return -1; + return TSDB_CODE_CLI_OUT_OF_MEMORY; } pMsg = pCmd->payload + tsRpcHeadSize; pStart = pMsg; SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - // use dbinfo from meterid without modifying current db info + + // use dbinfo from table id without modifying current db info tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); pMsg += sizeof(SMgmtHead); @@ -2339,70 +2314,69 @@ int tscBuildCreateTableMsg(SSqlObj *pSql) { pCreateTableMsg = (SCreateTableMsg *)pMsg; strcpy(pCreateTableMsg->meterId, pMeterMetaInfo->name); - pCreateTableMsg->igExists = pCmd->existsCheck ? 1 : 0; + SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo; + + pCreateTableMsg->igExists = pCreateTable->existCheck ? 1 : 0; + pCreateTableMsg->numOfColumns = htons(pCmd->numOfCols); pCreateTableMsg->numOfTags = htons(pCmd->count); - pMsg = (char *)pCreateTableMsg->schema; pCreateTableMsg->sqlLen = 0; - short sqlLen = (short)(strlen(tmpData) + 1); + pMsg = (char *)pCreateTableMsg->schema; - if (pCmd->numOfCols == 0 && pCmd->count == 0) { - // create by using metric, tags value - memcpy(pMsg, tmpData, sizeof(STagData)); + int8_t type = pInfo->pCreateTableInfo->type; + if (type == TSQL_CREATE_TABLE_FROM_STABLE) { // create by using super table, tags value + memcpy(pMsg, &pInfo->pCreateTableInfo->usingInfo.tagdata, sizeof(STagData)); pMsg += sizeof(STagData); - } else { - // create metric/create normal meter + } else { // create (super) table pSchema = pCreateTableMsg->schema; + for (int i = 0; i < pCmd->numOfCols + pCmd->count; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); pSchema->type = pField->type; strcpy(pSchema->name, pField->name); pSchema->bytes = htons(pField->bytes); + pSchema++; } pMsg = (char *)pSchema; + if (type == TSQL_CREATE_STREAM) { // check if it is a stream sql + SQuerySQL *pQuerySql = pInfo->pCreateTableInfo->pSelect; - // check if it is a stream sql - if (sqlLen > 1) { - memcpy(pMsg, tmpData, sqlLen); - pMsg[sqlLen - 1] = 0; - - pCreateTableMsg->sqlLen = htons(sqlLen); - pMsg += sqlLen; + strncpy(pMsg, pQuerySql->selectToken.z, pQuerySql->selectToken.n + 1); + pCreateTableMsg->sqlLen = htons(pQuerySql->selectToken.n + 1); + pMsg += pQuerySql->selectToken.n + 1; } } - tfree(tmpData); - tscClearFieldInfo(&pCmd->fieldsInfo); + tscClearFieldInfo(&pQueryInfo->fieldsInfo); msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_CREATE_TABLE; assert(msgLen + minMsgSize() <= size); - return msgLen; + return TSDB_CODE_SUCCESS; } int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) { - return minMsgSize() + sizeof(SMgmtHead) + sizeof(SAlterTableMsg) + sizeof(SSchema) * pCmd->numOfCols + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + return minMsgSize() + sizeof(SMgmtHead) + sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) + TSDB_EXTRA_PAYLOAD_SIZE; } -int tscBuildAlterTableMsg(SSqlObj *pSql) { +int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SAlterTableMsg *pAlterTableMsg; char * pMsg, *pStart; int msgLen = 0; int size = 0; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); - char buf[TSDB_MAX_TAGS_LEN] = {0}; - int32_t len = (TSDB_MAX_TAGS_LEN < pCmd->allocSize) ? TSDB_MAX_TAGS_LEN : pCmd->allocSize; - memcpy(buf, pCmd->payload, len); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); size = tscEstimateAlterTableMsgLength(pCmd); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { @@ -2417,15 +2391,18 @@ int tscBuildAlterTableMsg(SSqlObj *pSql) { tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); pMsg += sizeof(SMgmtHead); + SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo; + pAlterTableMsg = (SAlterTableMsg *)pMsg; strcpy(pAlterTableMsg->meterId, pMeterMetaInfo->name); - pAlterTableMsg->type = htons(pCmd->count); - pAlterTableMsg->numOfCols = htons(pCmd->numOfCols); - memcpy(pAlterTableMsg->tagVal, buf, TSDB_MAX_TAGS_LEN); + pAlterTableMsg->type = htons(pAlterInfo->type); + + pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo)); + memcpy(pAlterTableMsg->tagVal, pAlterInfo->tagData.data, TSDB_MAX_TAGS_LEN); SSchema *pSchema = pAlterTableMsg->schema; - for (int i = 0; i < pCmd->numOfCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + for (int i = 0; i < tscNumOfFields(pQueryInfo); ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); pSchema->type = pField->type; strcpy(pSchema->name, pField->name); @@ -2440,17 +2417,18 @@ int tscBuildAlterTableMsg(SSqlObj *pSql) { pCmd->msgType = TSDB_MSG_TYPE_ALTER_TABLE; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } -int tscAlterDbMsg(SSqlObj *pSql) { +int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SAlterDbMsg *pAlterDbMsg; char * pMsg, *pStart; int msgLen = 0; SSqlCmd * pCmd = &pSql->cmd; STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); pStart = pCmd->payload + tsRpcHeadSize; pMsg = pStart; @@ -2468,38 +2446,10 @@ int tscAlterDbMsg(SSqlObj *pSql) { pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_ALTER_DB; - return msgLen; -} - -int tscBuildDropTableMsg(SSqlObj *pSql) { - SDropTableMsg *pDropTableMsg; - char * pMsg, *pStart; - int msgLen = 0; - - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - pMsg = pCmd->payload + tsRpcHeadSize; - pStart = pMsg; - - SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - tscGetDBInfoFromMeterId(pMeterMetaInfo->name, pMgmt->db); - pMsg += sizeof(SMgmtHead); - - pDropTableMsg = (SDropTableMsg *)pMsg; - strcpy(pDropTableMsg->meterId, pMeterMetaInfo->name); - - pDropTableMsg->igNotExists = pCmd->existsCheck ? 1 : 0; - pMsg += sizeof(SDropTableMsg); - - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; - pCmd->msgType = TSDB_MSG_TYPE_DROP_TABLE; - - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { +int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char *pMsg, *pStart; int msgLen = 0; @@ -2510,7 +2460,8 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { SMgmtHead *pMgmt = (SMgmtHead *)pMsg; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); size_t nameLen = strlen(pMeterMetaInfo->name); if (nameLen > 0) { @@ -2521,35 +2472,35 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); - *((uint64_t *) pMsg) = pSql->res.qhandle; + *((uint64_t *)pMsg) = pSql->res.qhandle; pMsg += sizeof(pSql->res.qhandle); - *((uint16_t*) pMsg) = htons(pCmd->type); - pMsg += sizeof(pCmd->type); + *((uint16_t *)pMsg) = htons(pQueryInfo->type); + pMsg += sizeof(pQueryInfo->type); msgLen = pMsg - pStart; pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_RETRIEVE; - return msgLen; + return TSDB_CODE_SUCCESS; } -static int tscSetResultPointer(SSqlCmd *pCmd, SSqlRes *pRes) { - if (tscCreateResPointerInfo(pCmd, pRes) != TSDB_CODE_SUCCESS) { +static int tscSetResultPointer(SQueryInfo *pQueryInfo, SSqlRes *pRes) { + if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) { return pRes->code; } - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - int16_t offset = tscFieldInfoGetOffset(pCmd, i); + for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); pRes->bytes[i] = pField->bytes; - if (pCmd->order.order == TSQL_SO_DESC) { - pRes->bytes[i] = -pRes->bytes[i]; - pRes->tsrow[i] = ((pRes->data + offset * pRes->numOfRows) + (pRes->numOfRows - 1) * pField->bytes); - } else { +// if (pQueryInfo->order.order == TSQL_SO_DESC) { +// pRes->bytes[i] = -pRes->bytes[i]; +// pRes->tsrow[i] = ((pRes->data + offset * pRes->numOfRows) + (pRes->numOfRows - 1) * pField->bytes); +// } else { pRes->tsrow[i] = (pRes->data + offset * pRes->numOfRows); - } +// } } return 0; @@ -2565,6 +2516,8 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + pRes->code = TSDB_CODE_SUCCESS; if (pRes->rspType == 0) { @@ -2572,9 +2525,7 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { pRes->row = 0; pRes->rspType = 1; - tscSetResultPointer(pCmd, pRes); - pRes->row = 0; - + tscSetResultPointer(pQueryInfo, pRes); } else { tscResetForNextRetrieve(pRes); } @@ -2593,7 +2544,7 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { int tscProcessDescribeTableRsp(SSqlObj *pSql) { SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); int32_t numOfRes = pMeterMetaInfo->pMeterMeta->numOfColumns + pMeterMetaInfo->pMeterMeta->numOfTags; @@ -2601,11 +2552,13 @@ int tscProcessDescribeTableRsp(SSqlObj *pSql) { } int tscProcessTagRetrieveRsp(SSqlObj *pSql) { - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); int32_t numOfRes = 0; - if (tscSqlExprGet(pCmd, 0)->functionId == TSDB_FUNC_TAGPRJ) { + if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_TAGPRJ) { numOfRes = pMeterMetaInfo->pMetricMeta->numOfMeters; } else { numOfRes = 1; // for count function, there is only one output. @@ -2617,18 +2570,19 @@ int tscProcessRetrieveMetricRsp(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - pRes->code = tscLocalDoReduce(pSql); + pRes->code = tscDoLocalreduce(pSql); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { - tscSetResultPointer(pCmd, pRes); + tscSetResultPointer(pQueryInfo, pRes); } pRes->row = 0; - uint8_t code = pSql->res.code; + uint8_t code = pRes->code; if (pSql->fp) { // async retrieve metric data - if (pSql->res.code == TSDB_CODE_SUCCESS) { - (*pSql->fp)(pSql->param, pSql, pSql->res.numOfRows); + if (pRes->code == TSDB_CODE_SUCCESS) { + (*pSql->fp)(pSql->param, pSql, pRes->numOfRows); } else { tscQueueAsyncRes(pSql); } @@ -2639,10 +2593,9 @@ int tscProcessRetrieveMetricRsp(SSqlObj *pSql) { int tscProcessEmptyResultRsp(SSqlObj *pSql) { return tscLocalResultCommonBuilder(pSql, 0); } -int tscBuildConnectMsg(SSqlObj *pSql) { +int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SConnectMsg *pConnect; char * pMsg, *pStart; - int msgLen = 0; SSqlCmd *pCmd = &pSql->cmd; STscObj *pObj = pSql->pTscObj; @@ -2656,16 +2609,17 @@ int tscBuildConnectMsg(SSqlObj *pSql) { db = (db == NULL) ? pObj->db : db + 1; strcpy(pConnect->db, db); + strcpy(pConnect->clientVersion, version); + pMsg += sizeof(SConnectMsg); - msgLen = pMsg - pStart; - pCmd->payloadLen = msgLen; + pCmd->payloadLen = pMsg - pStart; pCmd->msgType = TSDB_MSG_TYPE_CONNECT; - return msgLen; + return TSDB_CODE_SUCCESS; } -int tscBuildMeterMetaMsg(SSqlObj *pSql) { +int tscBuildMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SMeterInfoMsg *pInfoMsg; char * pMsg, *pStart; int msgLen = 0; @@ -2673,13 +2627,18 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { char *tmpData = 0; if (pSql->cmd.allocSize > 0) { tmpData = calloc(1, pSql->cmd.allocSize); - if (NULL == tmpData) return -1; + if (NULL == tmpData) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } + // STagData is in binary format, strncpy is not available memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); } - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); pMsg = pCmd->payload + tsRpcHeadSize; pStart = pMsg; @@ -2691,10 +2650,10 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { pInfoMsg = (SMeterInfoMsg *)pMsg; strcpy(pInfoMsg->meterId, pMeterMetaInfo->name); - pInfoMsg->createFlag = htons((uint16_t)pCmd->defaultVal[0]); + pInfoMsg->createFlag = htons(pSql->cmd.createOnDemand ? 1 : 0); pMsg += sizeof(SMeterInfoMsg); - if (pCmd->defaultVal[0] != 0) { + if (pSql->cmd.createOnDemand) { memcpy(pInfoMsg->tags, tmpData, sizeof(STagData)); pMsg += sizeof(STagData); } @@ -2706,7 +2665,7 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { tfree(tmpData); assert(msgLen + minMsgSize() <= pCmd->allocSize); - return msgLen; + return TSDB_CODE_SUCCESS; } /** @@ -2714,7 +2673,7 @@ int tscBuildMeterMetaMsg(SSqlObj *pSql) { * | SMgmtHead | SMultiMeterInfoMsg | meterId0 | meterId1 | meterId2 | ...... * no used 4B **/ -int tscBuildMultiMeterMetaMsg(SSqlObj *pSql) { +int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; // copy payload content to temp buff @@ -2752,31 +2711,38 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql) { static int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) { const int32_t defaultSize = minMsgSize() + sizeof(SMetricMetaMsg) + sizeof(SMgmtHead) + sizeof(int16_t) * TSDB_MAX_TAGS; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); int32_t n = 0; - for (int32_t i = 0; i < pCmd->tagCond.numOfTagCond; ++i) { - n += pCmd->tagCond.cond[i].cond.n; + for (int32_t i = 0; i < pQueryInfo->tagCond.numOfTagCond; ++i) { + n += strlen(pQueryInfo->tagCond.cond[i].cond); + } + + int32_t tagLen = n * TSDB_NCHAR_SIZE; + if (pQueryInfo->tagCond.tbnameCond.cond != NULL) { + tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE; } - int32_t tagLen = n * TSDB_NCHAR_SIZE + pCmd->tagCond.tbnameCond.cond.n * TSDB_NCHAR_SIZE; int32_t joinCondLen = (TSDB_METER_ID_LEN + sizeof(int16_t)) * 2; - int32_t elemSize = sizeof(SMetricMetaElemMsg) * pCmd->numOfTables; + int32_t elemSize = sizeof(SMetricMetaElemMsg) * pQueryInfo->numOfTables; int32_t len = tagLen + joinCondLen + elemSize + defaultSize; return MAX(len, TSDB_DEFAULT_PAYLOAD_SIZE); } -int tscBuildMetricMetaMsg(SSqlObj *pSql) { +int tscBuildMetricMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SMetricMetaMsg *pMetaMsg; char * pMsg, *pStart; int msgLen = 0; int tableIndex = 0; - SSqlCmd * pCmd = &pSql->cmd; - STagCond *pTagCond = &pCmd->tagCond; + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + STagCond *pTagCond = &pQueryInfo->tagCond; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); int32_t size = tscEstimateMetricMetaMsgSize(pCmd); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { @@ -2793,7 +2759,7 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { pMsg += sizeof(SMgmtHead); pMetaMsg = (SMetricMetaMsg *)pMsg; - pMetaMsg->numOfMeters = htonl(pCmd->numOfTables); + pMetaMsg->numOfMeters = htonl(pQueryInfo->numOfTables); pMsg += sizeof(SMetricMetaMsg); @@ -2815,8 +2781,8 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { *(int16_t *)pMsg = pTagCond->joinInfo.right.tagCol; pMsg += sizeof(int16_t); - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, i); uint64_t uid = pMeterMetaInfo->pMeterMeta->uid; offset = pMsg - (char *)pMetaMsg; @@ -2830,8 +2796,9 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { if (pTagCond->numOfTagCond > 0) { SCond *pCond = tsGetMetricQueryCondPos(pTagCond, uid); if (pCond != NULL) { - condLen = pCond->cond.n + 1; - bool ret = taosMbsToUcs4(pCond->cond.z, pCond->cond.n, pMsg, pCond->cond.n * TSDB_NCHAR_SIZE); + condLen = strlen(pCond->cond) + 1; + + bool ret = taosMbsToUcs4(pCond->cond, condLen, pMsg, condLen * TSDB_NCHAR_SIZE); if (!ret) { tscError("%p mbs to ucs4 failed:%s", pSql, tsGetMetricQueryCondPos(pTagCond, uid)); return 0; @@ -2850,15 +2817,17 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { offset = pMsg - (char *)pMetaMsg; pElem->tableCond = htonl(offset); - pElem->tableCondLen = htonl(pTagCond->tbnameCond.cond.n); - memcpy(pMsg, pTagCond->tbnameCond.cond.z, pTagCond->tbnameCond.cond.n); - pMsg += pTagCond->tbnameCond.cond.n; + uint32_t len = strlen(pTagCond->tbnameCond.cond); + pElem->tableCondLen = htonl(len); + + memcpy(pMsg, pTagCond->tbnameCond.cond, len); + pMsg += len; } - SSqlGroupbyExpr *pGroupby = &pCmd->groupbyExpr; + SSqlGroupbyExpr *pGroupby = &pQueryInfo->groupbyExpr; - if (pGroupby->tableIndex != i) { + if (pGroupby->tableIndex != i && pGroupby->numOfGroupCols > 0) { pElem->orderType = 0; pElem->orderIndex = 0; pElem->numOfGroupCols = 0; @@ -2874,17 +2843,16 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { offset = pMsg - (char *)pMetaMsg; pElem->groupbyTagColumnList = htonl(offset); - for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) { - SColIndexEx *pCol = &pCmd->groupbyExpr.columnInfo[j]; + for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { + SColIndexEx *pCol = &pQueryInfo->groupbyExpr.columnInfo[j]; + SColIndexEx *pDestCol = (SColIndexEx *)pMsg; - *((int16_t *)pMsg) = pCol->colId; - pMsg += sizeof(pCol->colId); + pDestCol->colIdxInBuf = 0; + pDestCol->colIdx = htons(pCol->colIdx); + pDestCol->colId = htons(pDestCol->colId); + pDestCol->flag = htons(pDestCol->flag); - *((int16_t *)pMsg) += pCol->colIdx; - pMsg += sizeof(pCol->colIdx); - - *((int16_t *)pMsg) += pCol->flag; - pMsg += sizeof(pCol->flag); + pMsg += sizeof(SColIndexEx); } } } @@ -2900,7 +2868,8 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) { pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_METRIC_META; assert(msgLen + minMsgSize() <= size); - return msgLen; + + return TSDB_CODE_SUCCESS; } int tscEstimateHeartBeatMsgLength(SSqlObj *pSql) { @@ -2926,7 +2895,7 @@ int tscEstimateHeartBeatMsgLength(SSqlObj *pSql) { return size + TSDB_EXTRA_PAYLOAD_SIZE; } -int tscBuildHeartBeatMsg(SSqlObj *pSql) { +int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char *pMsg, *pStart; int msgLen = 0; int size = 0; @@ -2960,28 +2929,6 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql) { return msgLen; } -int tscProcessRetrieveRspFromMgmt(SSqlObj *pSql) { - SSqlRes *pRes = &pSql->res; - SSqlCmd *pCmd = &pSql->cmd; - STscObj *pObj = pSql->pTscObj; - - SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)(pRes->pRsp); - pRes->numOfRows = htonl(pRetrieve->numOfRows); - pRes->precision = htons(pRes->precision); - - pRes->data = pRetrieve->data; - - tscSetResultPointer(pCmd, pRes); - - if (pRes->numOfRows == 0) { - taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); - pSql->thandle = NULL; - } - - pRes->row = 0; - return 0; -} - int tscProcessMeterMetaRsp(SSqlObj *pSql) { SMeterMeta *pMeta; SSchema * pSchema; @@ -3010,17 +2957,12 @@ int tscProcessMeterMetaRsp(SSqlObj *pSql) { pMeta->numOfColumns = htons(pMeta->numOfColumns); - if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) { - tscError("invalid tag value count:%d", pMeta->numOfTags); - return TSDB_CODE_INVALID_VALUE; - } - if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) { tscError("invalid numOfTags:%d", pMeta->numOfTags); return TSDB_CODE_INVALID_VALUE; } - if (pMeta->numOfColumns > TSDB_MAX_COLUMNS || pMeta->numOfColumns < 0) { + if (pMeta->numOfColumns > TSDB_MAX_COLUMNS || pMeta->numOfColumns <= 0) { tscError("invalid numOfColumns:%d", pMeta->numOfColumns); return TSDB_CODE_INVALID_VALUE; } @@ -3063,11 +3005,12 @@ int tscProcessMeterMetaRsp(SSqlObj *pSql) { pMeta->index = 0; // todo add one more function: taosAddDataIfNotExists(); - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), false); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); + assert(pMeterMetaInfo->pMeterMeta == NULL); pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosAddDataIntoCache(tscCacheHandle, pMeterMetaInfo->name, (char *)pMeta, size, tsMeterMetaKeepTimer); + // todo handle out of memory case if (pMeterMetaInfo->pMeterMeta == NULL) return 0; return TSDB_CODE_OTHERS; @@ -3226,51 +3169,55 @@ int tscProcessMetricMetaRsp(SSqlObj *pSql) { size += pMeta->numOfVnodes * sizeof(SVnodeSidList *) + pMeta->numOfMeters * sizeof(SMeterSidExtInfo *); - char *pStr = calloc(1, size); - if (pStr == NULL) { + char *pBuf = calloc(1, size); + if (pBuf == NULL) { pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; goto _error_clean; } - SMetricMeta *pNewMetricMeta = (SMetricMeta *)pStr; + SMetricMeta *pNewMetricMeta = (SMetricMeta *)pBuf; metricMetaList[k] = pNewMetricMeta; pNewMetricMeta->numOfMeters = pMeta->numOfMeters; pNewMetricMeta->numOfVnodes = pMeta->numOfVnodes; pNewMetricMeta->tagLen = pMeta->tagLen; - pStr = pStr + sizeof(SMetricMeta) + pNewMetricMeta->numOfVnodes * sizeof(SVnodeSidList *); + pBuf = pBuf + sizeof(SMetricMeta) + pNewMetricMeta->numOfVnodes * sizeof(SVnodeSidList *); for (int32_t i = 0; i < pMeta->numOfVnodes; ++i) { SVnodeSidList *pSidLists = (SVnodeSidList *)rsp; - memcpy(pStr, pSidLists, sizeof(SVnodeSidList)); + memcpy(pBuf, pSidLists, sizeof(SVnodeSidList)); - pNewMetricMeta->list[i] = pStr - (char *)pNewMetricMeta; // offset value - SVnodeSidList *pLists = (SVnodeSidList *)pStr; + pNewMetricMeta->list[i] = pBuf - (char *)pNewMetricMeta; // offset value + SVnodeSidList *pLists = (SVnodeSidList *)pBuf; tscTrace("%p metricmeta:vid:%d,numOfMeters:%d", pSql, i, pLists->numOfSids); - pStr += sizeof(SVnodeSidList) + sizeof(SMeterSidExtInfo *) * pSidLists->numOfSids; + pBuf += sizeof(SVnodeSidList) + sizeof(SMeterSidExtInfo *) * pSidLists->numOfSids; rsp += sizeof(SVnodeSidList); - size_t sidSize = sizeof(SMeterSidExtInfo) + pNewMetricMeta->tagLen; + size_t elemSize = sizeof(SMeterSidExtInfo) + pNewMetricMeta->tagLen; for (int32_t j = 0; j < pSidLists->numOfSids; ++j) { - pLists->pSidExtInfoList[j] = pStr - (char *)pLists; - memcpy(pStr, rsp, sidSize); + pLists->pSidExtInfoList[j] = pBuf - (char *)pLists; + memcpy(pBuf, rsp, elemSize); + + ((SMeterSidExtInfo *)pBuf)->uid = htobe64(((SMeterSidExtInfo *)pBuf)->uid); + ((SMeterSidExtInfo *)pBuf)->sid = htonl(((SMeterSidExtInfo *)pBuf)->sid); - rsp += sidSize; - pStr += sidSize; + rsp += elemSize; + pBuf += elemSize; } } - sizes[k] = pStr - (char *)pNewMetricMeta; + sizes[k] = pBuf - (char *)pNewMetricMeta; } + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); for (int32_t i = 0; i < num; ++i) { char name[TSDB_MAX_TAGS_LEN + 1] = {0}; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, i); - tscGetMetricMetaCacheKey(&pSql->cmd, name, pMeterMetaInfo->pMeterMeta->uid); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + tscGetMetricMetaCacheKey(pQueryInfo, name, pMeterMetaInfo->pMeterMeta->uid); #ifdef _DEBUG_VIEW printf("generate the metric key:%s, index:%d\n", name, i); @@ -3311,9 +3258,12 @@ int tscProcessShowRsp(SSqlObj *pSql) { SSchema * pSchema; char key[20]; - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); //? + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); pShow = (SShowRspMsg *)pRes->pRsp; pRes->qhandle = pShow->qhandle; @@ -3330,7 +3280,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { pSchema++; } - key[0] = pCmd->showType + 'a'; + key[0] = pCmd->msgType + 'a'; strcpy(key + 1, "showlist"); taosRemoveDataFromCache(tscCacheHandle, (void *)&(pMeterMetaInfo->pMeterMeta), false); @@ -3338,24 +3288,24 @@ int tscProcessShowRsp(SSqlObj *pSql) { int32_t size = pMeta->numOfColumns * sizeof(SSchema) + sizeof(SMeterMeta); pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosAddDataIntoCache(tscCacheHandle, key, (char *)pMeta, size, tsMeterMetaKeepTimer); - pCmd->numOfCols = pCmd->fieldsInfo.numOfOutputCols; + pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; SSchema *pMeterSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - tscColumnBaseInfoReserve(&pCmd->colList, pMeta->numOfColumns); + tscColumnBaseInfoReserve(&pQueryInfo->colList, pMeta->numOfColumns); SColumnIndex index = {0}; for (int16_t i = 0; i < pMeta->numOfColumns; ++i) { index.columnIndex = i; - tscColumnBaseInfoInsert(pCmd, &index); - tscFieldInfoSetValFromSchema(&pCmd->fieldsInfo, i, &pMeterSchema[i]); + tscColumnBaseInfoInsert(pQueryInfo, &index); + tscFieldInfoSetValFromSchema(&pQueryInfo->fieldsInfo, i, &pMeterSchema[i]); } - tscFieldInfoCalOffset(pCmd); + tscFieldInfoCalOffset(pQueryInfo); return 0; } int tscProcessConnectRsp(SSqlObj *pSql) { - char temp[TSDB_METER_ID_LEN]; + char temp[TSDB_METER_ID_LEN * 2]; SConnectRsp *pConnect; STscObj *pObj = pSql->pTscObj; @@ -3363,22 +3313,16 @@ int tscProcessConnectRsp(SSqlObj *pSql) { pConnect = (SConnectRsp *)pRes->pRsp; strcpy(pObj->acctId, pConnect->acctId); // copy acctId from response - sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); - strcpy(pObj->db, temp); -#ifdef CLUSTER + int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); + + assert(len <= tListLen(pObj->db)); + strncpy(pObj->db, temp, tListLen(pObj->db)); + SIpList * pIpList; char *rsp = pRes->pRsp + sizeof(SConnectRsp); pIpList = (SIpList *)rsp; - tscMgmtIpList.numOfIps = pIpList->numOfIps; - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - } - - rsp += sizeof(SIpList) + sizeof(int32_t) * pIpList->numOfIps; + tscSetMgmtIpList(pIpList); - tscPrintMgmtIp(); -#endif strcpy(pObj->sversion, pConnect->version); pObj->writeAuth = pConnect->writeAuth; pObj->superAuth = pConnect->superAuth; @@ -3389,7 +3333,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) { int tscProcessUseDbRsp(SSqlObj *pSql) { STscObj * pObj = pSql->pTscObj; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); strcpy(pObj->db, pMeterMetaInfo->name); return 0; @@ -3401,7 +3345,7 @@ int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { } int tscProcessDropTableRsp(SSqlObj *pSql) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pMeterMetaInfo->name); if (pMeterMeta == NULL) { @@ -3428,7 +3372,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) { } int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pMeterMetaInfo->name); if (pMeterMeta == NULL) { /* not in cache, abort */ @@ -3439,12 +3383,12 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { taosRemoveDataFromCache(tscCacheHandle, (void **)&pMeterMeta, true); if (pMeterMetaInfo->pMeterMeta) { - bool isMetric = UTIL_METER_IS_METRIC(pMeterMetaInfo); + bool isSuperTable = UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), true); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), true); - if (isMetric) { // if it is a metric, reset whole query cache + if (isSuperTable) { // if it is a super table, reset whole query cache tscTrace("%p reset query cache since table:%s is stable", pSql, pMeterMetaInfo->name); taosClearDataCache(tscCacheHandle); } @@ -3477,19 +3421,39 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { pRes->numOfRows = htonl(pRetrieve->numOfRows); pRes->precision = htons(pRetrieve->precision); pRes->offset = htobe64(pRetrieve->offset); - pRes->useconds = htobe64(pRetrieve->useconds); pRes->data = pRetrieve->data; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + tscSetResultPointer(pQueryInfo, pRes); + + if (pSql->pSubscription != NULL) { + int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutputCols; + + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, numOfCols - 1); + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, numOfCols - 1); + + char* p = pRes->data + (pField->bytes + offset) * pRes->numOfRows; + + int32_t numOfMeters = htonl(*(int32_t*)p); + p += sizeof(int32_t); + for (int i = 0; i < numOfMeters; i++) { + int64_t uid = htobe64(*(int64_t*)p); + p += sizeof(int64_t); + TSKEY key = htobe64(*(TSKEY*)p); + p += sizeof(TSKEY); + tscUpdateSubscriptionProgress(pSql->pSubscription, uid, key); + } + } - tscSetResultPointer(pCmd, pRes); pRes->row = 0; /** * If the query result is exhausted, or current query is to free resource at server side, * the connection will be recycled. */ - if ((pRes->numOfRows == 0 && !(tscProjectionQueryOnMetric(pCmd) && pRes->offset > 0)) || - ((pCmd->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE)) { + if ((pRes->numOfRows == 0 && !(tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && pRes->offset > 0)) || + ((pQueryInfo->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE)) { tscTrace("%p no result or free resource, recycle connection", pSql); taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); pSql->thandle = NULL; @@ -3501,21 +3465,23 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { } int tscProcessRetrieveRspFromLocal(SSqlObj *pSql) { - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; + SSqlRes * pRes = &pSql->res; + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)pRes->pRsp; pRes->numOfRows = htonl(pRetrieve->numOfRows); pRes->data = pRetrieve->data; - tscSetResultPointer(pCmd, pRes); + tscSetResultPointer(pQueryInfo, pRes); pRes->row = 0; return 0; } void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code); -static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { +static int32_t doGetMeterMetaFromServer(SSqlObj *pSql, SMeterMetaInfo *pMeterMetaInfo) { int32_t code = TSDB_CODE_SUCCESS; SSqlObj *pNew = calloc(1, sizeof(SSqlObj)); @@ -3523,23 +3489,29 @@ static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { tscError("%p malloc failed for new sqlobj to get meter meta", pSql); return TSDB_CODE_CLI_OUT_OF_MEMORY; } + pNew->pTscObj = pSql->pTscObj; pNew->signature = pNew; pNew->cmd.command = TSDB_SQL_META; - pNew->cmd.payload = NULL; - pNew->cmd.allocSize = 0; - pNew->cmd.defaultVal[0] = pSql->cmd.defaultVal[0]; // flag of create table if not exists + tscAddSubqueryInfo(&pNew->cmd); + + SQueryInfo *pNewQueryInfo = NULL; + tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo); + + pNew->cmd.createOnDemand = pSql->cmd.createOnDemand; // create table if not exists if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { tscError("%p malloc failed for payload to get meter meta", pSql); free(pNew); + return TSDB_CODE_CLI_OUT_OF_MEMORY; } - SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(&pNew->cmd); + SMeterMetaInfo *pNewMeterMetaInfo = tscAddEmptyMeterMetaInfo(pNewQueryInfo); + assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1); - strcpy(pMeterMetaInfo->name, meterId); - memcpy(pNew->cmd.payload, pSql->cmd.payload, TSDB_DEFAULT_PAYLOAD_SIZE); + strcpy(pNewMeterMetaInfo->name, pMeterMetaInfo->name); + memcpy(pNew->cmd.payload, pSql->cmd.payload, TSDB_DEFAULT_PAYLOAD_SIZE); // tag information if table does not exists. tscTrace("%p new pSqlObj:%p to get meterMeta", pSql, pNew); if (pSql->fp == NULL) { @@ -3547,14 +3519,17 @@ static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { tsem_init(&pNew->emptyRspSem, 0, 1); code = tscProcessSql(pNew); - SMeterMetaInfo *pInfo = tscGetMeterMetaInfo(&pSql->cmd, index); - // update cache only on success get metermeta + /* + * Update cache only on succeeding in getting metermeta. + * Transfer the ownership of metermeta to the new object, instead of invoking the release/acquire routine + */ if (code == TSDB_CODE_SUCCESS) { - pInfo->pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, meterId); + pMeterMetaInfo->pMeterMeta = taosTransferDataInCache(tscCacheHandle, (void**) &pNewMeterMetaInfo->pMeterMeta); + assert(pMeterMetaInfo->pMeterMeta != NULL); } - tscTrace("%p get meter meta complete, code:%d, pMeterMeta:%p", pSql, code, pInfo->pMeterMeta); + tscTrace("%p get meter meta complete, code:%d, pMeterMeta:%p", pSql, code, pMeterMetaInfo->pMeterMeta); tscFreeSqlObj(pNew); } else { @@ -3571,14 +3546,15 @@ static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { return code; } -int tscGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { - SSqlCmd * pCmd = &pSql->cmd; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index); - - // if the SSqlCmd owns a metermeta, release it first - taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), false); - pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, meterId); +int tscGetMeterMeta(SSqlObj *pSql, SMeterMetaInfo *pMeterMetaInfo) { + assert(strlen(pMeterMetaInfo->name) != 0); + // If this SMeterMetaInfo owns a metermeta, release it first + if (pMeterMetaInfo->pMeterMeta != NULL) { + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), false); + } + + pMeterMetaInfo->pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, pMeterMetaInfo->name); if (pMeterMetaInfo->pMeterMeta != NULL) { SMeterMeta *pMeterMeta = pMeterMetaInfo->pMeterMeta; @@ -3592,16 +3568,12 @@ int tscGetMeterMeta(SSqlObj *pSql, char *meterId, int32_t index) { * for async insert operation, release data block buffer before issue new object to get metermeta * because in metermeta callback function, the tscParse function will generate the submit data blocks */ - //if (pSql->fp != NULL && pSql->pStream == NULL) { - // tscFreeSqlCmdData(pCmd); - //} - - return tscDoGetMeterMeta(pSql, meterId, index); + return doGetMeterMetaFromServer(pSql, pMeterMetaInfo); } -int tscGetMeterMetaEx(SSqlObj *pSql, char *meterId, bool createIfNotExists) { - pSql->cmd.defaultVal[0] = createIfNotExists ? 1 : 0; - return tscGetMeterMeta(pSql, meterId, 0); +int tscGetMeterMetaEx(SSqlObj *pSql, SMeterMetaInfo *pMeterMetaInfo, bool createIfNotExists) { + pSql->cmd.createOnDemand = createIfNotExists; + return tscGetMeterMeta(pSql, pMeterMetaInfo); } /* @@ -3624,12 +3596,14 @@ static void tscWaitingForCreateTable(SSqlCmd *pCmd) { * @return status code */ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { - int code = 0; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + int code = 0; // handle metric meta renew process SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + // enforce the renew metermeta operation in async model if (pSql->fp == NULL) pSql->fp = (void *)0x1; @@ -3639,15 +3613,16 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { */ if (pMeterMetaInfo->pMeterMeta == NULL || !tscQueryOnMetric(pCmd)) { if (pMeterMetaInfo->pMeterMeta) { - tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%lld, addr:%p", pSql, + tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql, pMeterMetaInfo->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid, pMeterMetaInfo->pMeterMeta); } - tscWaitingForCreateTable(&pSql->cmd); + + tscWaitingForCreateTable(pCmd); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMeterMeta), true); - code = tscDoGetMeterMeta(pSql, meterId, 0); // todo ?? + code = doGetMeterMetaFromServer(pSql, pMeterMetaInfo); // todo ?? } else { - tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%lld, addr:%p", pSql, + tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql, pMeterMetaInfo->pMeterMeta->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid, pMeterMetaInfo->pMeterMeta); } @@ -3661,33 +3636,35 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { return code; } -int tscGetMetricMeta(SSqlObj *pSql) { +int tscGetMetricMeta(SSqlObj *pSql, int32_t clauseIndex) { int code = TSDB_CODE_NETWORK_UNAVAIL; SSqlCmd *pCmd = &pSql->cmd; /* - * the vnode query condition is serialized into pCmd->payload, we need to rebuild key for metricmeta info in cache. + * the query condition is serialized into pCmd->payload, we need to rebuild key for metricmeta info in cache. */ - bool reqMetricMeta = false; - for (int32_t i = 0; i < pSql->cmd.numOfTables; ++i) { + bool required = false; + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { char tagstr[TSDB_MAX_TAGS_LEN + 1] = {0}; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - tscGetMetricMetaCacheKey(pCmd, tagstr, pMeterMetaInfo->pMeterMeta->uid); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + tscGetMetricMetaCacheKey(pQueryInfo, tagstr, pMeterMetaInfo->pMeterMeta->uid); taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), false); SMetricMeta *ppMeta = (SMetricMeta *)taosGetDataFromCache(tscCacheHandle, tagstr); if (ppMeta == NULL) { - reqMetricMeta = true; + required = true; break; } else { pMeterMetaInfo->pMetricMeta = ppMeta; } } - // all metricmeta are retrieved from cache, no need to query mgmt node - if (!reqMetricMeta) { + // all metricmeta for one clause are retrieved from cache, no need to retrieve metricmeta from management node + if (!required) { return TSDB_CODE_SUCCESS; } @@ -3696,12 +3673,17 @@ int tscGetMetricMeta(SSqlObj *pSql) { pNew->signature = pNew; pNew->cmd.command = TSDB_SQL_METRIC; - - for (int32_t i = 0; i < pSql->cmd.numOfTables; ++i) { - SMeterMetaInfo *pMMInfo = tscGetMeterMetaInfo(&pSql->cmd, i); + + SQueryInfo *pNewQueryInfo = NULL; + if ((code = tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo)) != TSDB_CODE_SUCCESS) { + return code; + } + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + SMeterMetaInfo *pMMInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pMMInfo->name); - tscAddMeterMetaInfo(&pNew->cmd, pMMInfo->name, pMeterMeta, NULL, pMMInfo->numOfTags, pMMInfo->tagColumnIndex); + tscAddMeterMetaInfo(pNewQueryInfo, pMMInfo->name, pMeterMeta, NULL, pMMInfo->numOfTags, pMMInfo->tagColumnIndex); } if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { @@ -3709,18 +3691,23 @@ int tscGetMetricMeta(SSqlObj *pSql) { return code; } - // the query condition on meter is serialized into payload - tscTagCondCopy(&pNew->cmd.tagCond, &pSql->cmd.tagCond); + tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond); - pNew->cmd.groupbyExpr = pSql->cmd.groupbyExpr; - pNew->cmd.numOfTables = pSql->cmd.numOfTables; + pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr; + pNewQueryInfo->numOfTables = pQueryInfo->numOfTables; - pNew->cmd.slimit = pSql->cmd.slimit; - pNew->cmd.order = pSql->cmd.order; + pNewQueryInfo->slimit = pQueryInfo->slimit; + pNewQueryInfo->order = pQueryInfo->order; + + STagCond* pTagCond = &pNewQueryInfo->tagCond; + tscTrace("%p new sqlobj:%p info, numOfTables:%d, slimit:%" PRId64 ", soffset:%" PRId64 ", order:%d, tbname cond:%s", + pSql, pNew, pNewQueryInfo->numOfTables, pNewQueryInfo->slimit.limit, pNewQueryInfo->slimit.offset, + pNewQueryInfo->order.order, pTagCond->tbnameCond.cond) - if (pSql->fp != NULL && pSql->pStream == NULL) { - tscFreeSqlCmdData(&pSql->cmd); - } +// if (pSql->fp != NULL && pSql->pStream == NULL) { +// pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); +// tscFreeSubqueryInfo(pCmd); +// } tscTrace("%p allocate new pSqlObj:%p to get metricMeta", pSql, pNew); if (pSql->fp == NULL) { @@ -3729,18 +3716,20 @@ int tscGetMetricMeta(SSqlObj *pSql) { code = tscProcessSql(pNew); - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - char tagstr[TSDB_MAX_TAGS_LEN] = {0}; - - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - tscGetMetricMetaCacheKey(pCmd, tagstr, pMeterMetaInfo->pMeterMeta->uid); + if (code == TSDB_CODE_SUCCESS) {//todo optimize the performance + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + char tagstr[TSDB_MAX_TAGS_LEN] = {0}; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, i); + tscGetMetricMetaCacheKey(pQueryInfo, tagstr, pMeterMetaInfo->pMeterMeta->uid); #ifdef _DEBUG_VIEW - printf("create metric key:%s, index:%d\n", tagstr, i); + printf("create metric key:%s, index:%d\n", tagstr, i); #endif - - taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), false); - pMeterMetaInfo->pMetricMeta = (SMetricMeta *)taosGetDataFromCache(tscCacheHandle, tagstr); + + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pMeterMetaInfo->pMetricMeta), false); + pMeterMetaInfo->pMetricMeta = (SMetricMeta *)taosGetDataFromCache(tscCacheHandle, tagstr); + } } tscFreeSqlObj(pNew); @@ -3762,17 +3751,17 @@ void tscInitMsgs() { tscBuildMsg[TSDB_SQL_FETCH] = tscBuildRetrieveMsg; tscBuildMsg[TSDB_SQL_CREATE_DB] = tscBuildCreateDbMsg; - tscBuildMsg[TSDB_SQL_CREATE_USER] = tscBuildCreateUserMsg; + tscBuildMsg[TSDB_SQL_CREATE_USER] = tscBuildUserMsg; - tscBuildMsg[TSDB_SQL_CREATE_ACCT] = tscBuildCreateAcctMsg; - tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAlterAcctMsg; + tscBuildMsg[TSDB_SQL_CREATE_ACCT] = tscBuildAcctMsg; + tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg; tscBuildMsg[TSDB_SQL_CREATE_TABLE] = tscBuildCreateTableMsg; - tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserMsg; + tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropAcctMsg; tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropAcctMsg; tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg; tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg; - tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildAlterUserMsg; + tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg; tscBuildMsg[TSDB_SQL_CREATE_DNODE] = tscBuildCreateDnodeMsg; tscBuildMsg[TSDB_SQL_DROP_DNODE] = tscBuildDropDnodeMsg; tscBuildMsg[TSDB_SQL_CFG_DNODE] = tscBuildCfgDnodeMsg; @@ -3788,9 +3777,9 @@ void tscInitMsgs() { tscBuildMsg[TSDB_SQL_HB] = tscBuildHeartBeatMsg; tscBuildMsg[TSDB_SQL_SHOW] = tscBuildShowMsg; tscBuildMsg[TSDB_SQL_RETRIEVE] = tscBuildRetrieveFromMgmtMsg; - tscBuildMsg[TSDB_SQL_KILL_QUERY] = tscBuildKillQueryMsg; - tscBuildMsg[TSDB_SQL_KILL_STREAM] = tscBuildKillStreamMsg; - tscBuildMsg[TSDB_SQL_KILL_CONNECTION] = tscBuildKillConnectionMsg; + tscBuildMsg[TSDB_SQL_KILL_QUERY] = tscBuildKillMsg; + tscBuildMsg[TSDB_SQL_KILL_STREAM] = tscBuildKillMsg; + tscBuildMsg[TSDB_SQL_KILL_CONNECTION] = tscBuildKillMsg; tscProcessMsgRsp[TSDB_SQL_SELECT] = tscProcessQueryRsp; tscProcessMsgRsp[TSDB_SQL_FETCH] = tscProcessRetrieveRspFromVnode; @@ -3804,16 +3793,16 @@ void tscInitMsgs() { tscProcessMsgRsp[TSDB_SQL_MULTI_META] = tscProcessMultiMeterMetaRsp; tscProcessMsgRsp[TSDB_SQL_SHOW] = tscProcessShowRsp; - tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromVnode; // rsp handled by same function. + tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromVnode; // rsp handled by same function. tscProcessMsgRsp[TSDB_SQL_DESCRIBE_TABLE] = tscProcessDescribeTableRsp; - + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_TAGS] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CURRENT_DB] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CURRENT_USER] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_SERV_VERSION] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CLI_VERSION] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_SERV_STATUS] = tscProcessTagRetrieveRsp; - + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp; tscProcessMsgRsp[TSDB_SQL_RETRIEVE_METRIC] = tscProcessRetrieveMetricRsp; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 0732a87e64b0c5d942dc3b7ca1cd2bbfacba785b..6a03278a077bab0aa078685acd8a0ecd3402acce 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -14,23 +14,24 @@ */ #include "os.h" +#include "hash.h" #include "tcache.h" #include "tlog.h" +#include "tnote.h" #include "trpc.h" #include "tscJoinProcess.h" #include "tscProfile.h" +#include "tscSQLParser.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tsclient.h" #include "tscompression.h" #include "tsocket.h" -#include "tscSQLParser.h" #include "ttimer.h" #include "tutil.h" -#include "tnote.h" -TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), - void *param, void **taos) { +TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, + void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { STscObj *pObj; taos_init(); @@ -62,26 +63,24 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const } } -#ifdef CLUSTER if (ip && ip[0]) { + tscMgmtIpList.numOfIps = 4; + strcpy(tscMgmtIpList.ipstr[0], ip); + tscMgmtIpList.ip[0] = inet_addr(ip); strcpy(tscMgmtIpList.ipstr[1], ip); tscMgmtIpList.ip[1] = inet_addr(ip); + strcpy(tscMgmtIpList.ipstr[2], tsMasterIp); + tscMgmtIpList.ip[2] = inet_addr(tsMasterIp); + strcpy(tscMgmtIpList.ipstr[3], tsSecondIp); + tscMgmtIpList.ip[3] = inet_addr(tsSecondIp); } -#else - if (ip && ip[0]) { - if (ip != tsServerIpStr) { - strcpy(tsServerIpStr, ip); - } - tsServerIp = inet_addr(ip); - } -#endif pObj = (STscObj *)malloc(sizeof(STscObj)); if (NULL == pObj) { globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; return NULL; } - + memset(pObj, 0, sizeof(STscObj)); pObj->signature = pObj; @@ -113,7 +112,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const free(pObj); return NULL; } - + memset(pSql, 0, sizeof(SSqlObj)); pSql->pTscObj = pObj; pSql->signature = pSql; @@ -127,8 +126,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const } pSql->cmd.command = TSDB_SQL_CONNECT; - int ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); - if (TSDB_CODE_SUCCESS != ret) { + if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; free(pSql); free(pObj); @@ -152,51 +150,21 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) { if (ip == NULL || (ip != NULL && (strcmp("127.0.0.1", ip) == 0 || strcasecmp("localhost", ip) == 0))) { -#ifdef CLUSTER ip = tsMasterIp; -#else - ip = tsServerIpStr; -#endif } tscTrace("try to create a connection to %s", ip); void *taos = taos_connect_imp(ip, user, pass, db, port, NULL, NULL, NULL); if (taos != NULL) { - STscObj* pObj = (STscObj*) taos; + STscObj *pObj = (STscObj *)taos; // version compare only requires the first 3 segments of the version string - int32_t comparedSegments = 3; - char client_version[64] = {0}; - char server_version[64] = {0}; - int clientVersionNumber[4] = {0}; - int serverVersionNumber[4] = {0}; - - strcpy(client_version, version); - strcpy(server_version, taos_get_server_info(taos)); - - if (!taosGetVersionNumber(client_version, clientVersionNumber)) { - tscError("taos:%p, invalid client version:%s", taos, client_version); - pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION; + int code = taosCheckVersion(version, taos_get_server_info(taos), 3); + if (code != 0) { + pObj->pSql->res.code = code; taos_close(taos); return NULL; } - - if (!taosGetVersionNumber(server_version, serverVersionNumber)) { - tscError("taos:%p, invalid server version:%s", taos, server_version); - pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION; - taos_close(taos); - return NULL; - } - - for(int32_t i = 0; i < comparedSegments; ++i) { - if (clientVersionNumber[i] != serverVersionNumber[i]) { - tscError("taos:%p, the %d-th number of server version:%s not matched with client version:%s, close connection", - taos, i, server_version, version); - pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION; - taos_close(taos); - return NULL; - } - } } return taos; @@ -204,11 +172,6 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { -#ifndef CLUSTER - if (ip == NULL) { - ip = tsServerIpStr; - } -#endif return taos_connect_imp(ip, user, pass, db, port, fp, param, taos); } @@ -225,20 +188,22 @@ void taos_close(TAOS *taos) { } } -int taos_query_imp(STscObj* pObj, SSqlObj* pSql) { +int taos_query_imp(STscObj *pObj, SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; pRes->numOfRows = 1; pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; + pSql->asyncTblPos = NULL; if (NULL != pSql->pTableHashList) { - taosCleanUpIntHash(pSql->pTableHashList); + taosCleanUpHashTable(pSql->pTableHashList); pSql->pTableHashList = NULL; } - tscTrace("%p SQL: %s pObj:%p", pSql, pSql->sqlstr, pObj); + tscDump("%p pObj:%p, SQL: %s", pSql, pObj, pSql->sqlstr); - pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); + pRes->code = (uint8_t)tsParseSql(pSql, false); /* * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query. @@ -257,7 +222,7 @@ int taos_query_imp(STscObj* pObj, SSqlObj* pSql) { } else { tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pObj), pObj); } - + if (pRes->code != TSDB_CODE_SUCCESS) { tscFreeSqlObjPartial(pSql); } @@ -276,10 +241,11 @@ int taos_query(TAOS *taos, const char *sqlstr) { SSqlRes *pRes = &pSql->res; size_t sqlLen = strlen(sqlstr); - if (sqlLen > TSDB_MAX_SQL_LEN) { - pRes->code = tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql + if (sqlLen > tsMaxSQLStringLen) { + pRes->code = + tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); - + return pRes->code; } @@ -289,7 +255,7 @@ int taos_query(TAOS *taos, const char *sqlstr) { if (sql == NULL) { pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscError("%p failed to malloc sql string buffer, reason:%s", pSql, strerror(errno)); - + tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); return pRes->code; } @@ -322,8 +288,12 @@ int taos_num_fields(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; if (pSql == NULL || pSql->signature != pSql) return 0; - SFieldInfo *pFieldsInfo = &pSql->cmd.fieldsInfo; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (pQueryInfo == NULL) { + return 0; + } + SFieldInfo *pFieldsInfo = &pQueryInfo->fieldsInfo; return (pFieldsInfo->numOfOutputCols - pFieldsInfo->numOfHiddenCols); } @@ -345,7 +315,8 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; if (pSql == NULL || pSql->signature != pSql) return 0; - return pSql->cmd.fieldsInfo.pFields; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + return pQueryInfo->fieldsInfo.pFields; } int taos_retrieve(TAOS_RES *res) { @@ -391,47 +362,54 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) { // secondary merge has handle this situation if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { - pRes->numOfTotal += pRes->numOfRows; + pRes->numOfTotalInCurrentClause += pRes->numOfRows; } - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + - pRes->bytes[i] * (1 - pCmd->order.order) * (pRes->numOfRows - 1); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { +// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + +// pRes->bytes[i] * (1 - pQueryInfo->order.order) * (pRes->numOfRows - 1); + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order); } *rows = pRes->tsrow; - return (pCmd->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows; + return (pQueryInfo->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows; } static void **doSetResultRowData(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + + assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows); + + if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker + tfree(pRes->tsrow); + return pRes->tsrow; + } + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); int32_t num = 0; - - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; + for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row; // primary key column cannot be null in interval query, no need to check - if (i == 0 && pCmd->nAggTimeInterval > 0) { + if (i == 0 && pQueryInfo->nAggTimeInterval > 0) { continue; } - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); if (isNull(pRes->tsrow[i], pField->type)) { pRes->tsrow[i] = NULL; } else if (pField->type == TSDB_DATA_TYPE_NCHAR) { // convert unicode to native code in a temporary buffer extra one byte for terminated symbol if (pRes->buffer[num] == NULL) { - pRes->buffer[num] = malloc(pField->bytes + 1); - } else { - pRes->buffer[num] = realloc(pRes->buffer[num], pField->bytes + 1); + pRes->buffer[num] = malloc(pField->bytes + TSDB_NCHAR_SIZE); } - /* string terminated */ - memset(pRes->buffer[num], 0, pField->bytes + 1); + /* string terminated char for binary data*/ + memset(pRes->buffer[num], 0, pField->bytes + TSDB_NCHAR_SIZE); if (taosUcs4ToMbs(pRes->tsrow[i], pField->bytes, pRes->buffer[num])) { pRes->tsrow[i] = pRes->buffer[num]; @@ -439,96 +417,107 @@ static void **doSetResultRowData(SSqlObj *pSql) { tscError("%p charset:%s to %s. val:%ls convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, pRes->tsrow); pRes->tsrow[i] = NULL; } + num++; } } - assert(num <= pCmd->fieldsInfo.numOfOutputCols); - - return pRes->tsrow; -} - -static void **getOneRowFromBuf(SSqlObj *pSql) { - doSetResultRowData(pSql); - - SSqlRes *pRes = &pSql->res; - pRes->row++; - + assert(num <= pQueryInfo->fieldsInfo.numOfOutputCols); + + pRes->row++; // index increase one-step return pRes->tsrow; } -static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { +static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) { + bool hasData = true; SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - while (1) { - bool hasData = true; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + bool allSubqueryExhausted = true; for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == NULL) { + continue; + } + SSqlRes *pRes1 = &pSql->pSubs[i]->res; + SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd; - // in case inner join, if any subquery exhausted, query completed - if (pRes1->numOfRows == 0) { - hasData = false; + SQueryInfo * pQueryInfo1 = tscGetQueryInfoDetail(pCmd1, pCmd1->clauseIndex); + SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo1, 0); + + assert(pQueryInfo1->numOfTables == 1); + + /* + * if the global limitation is not reached, and current result has not exhausted, or next more vnodes are + * available, goes on + */ + if (pMetaInfo->vnodeIndex < pMetaInfo->pMetricMeta->numOfVnodes && pRes1->row < pRes1->numOfRows && + (!tscHasReachLimitation(pQueryInfo1, pRes1))) { + allSubqueryExhausted = false; break; } } - if (!hasData) { // free all sub sqlobj - tscTrace("%p one subquery exhausted, free other %d subquery", pSql, pSql->numOfSubs - 1); - - SSubqueryState *pState = NULL; - - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlObj * pChildObj = pSql->pSubs[i]; - SJoinSubquerySupporter *pSupporter = (SJoinSubquerySupporter *)pChildObj->param; - pState = pSupporter->pState; - - tscDestroyJoinSupporter(pChildObj->param); - taos_free_result(pChildObj); + hasData = !allSubqueryExhausted; + } else { // otherwise, in case inner join, if any subquery exhausted, query completed. + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] == 0) { + continue; } + + SSqlRes * pRes1 = &pSql->pSubs[i]->res; + SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0); - free(pState); - return NULL; + if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pQueryInfo1, pRes1) && + tscProjectionQueryOnTable(pQueryInfo1)) || + (pRes1->numOfRows == 0)) { + hasData = false; + break; + } } + } + + return hasData; +} + +static void **tscBuildResFromSubqueries(SSqlObj *pSql) { + SSqlRes *pRes = &pSql->res; + while (1) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); if (pRes->tsrow == NULL) { - pRes->tsrow = malloc(sizeof(void *) * pCmd->exprsInfo.numOfExprs); + pRes->tsrow = calloc(pQueryInfo->exprsInfo.numOfExprs, POINTER_BYTES); } bool success = false; - if (pSql->numOfSubs >= 2) { - // do merge result - SSqlRes *pRes1 = &pSql->pSubs[0]->res; - SSqlRes *pRes2 = &pSql->pSubs[1]->res; - - while (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) { - doSetResultRowData(pSql->pSubs[0]); - doSetResultRowData(pSql->pSubs[1]); - - TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; - TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; - - if (key1 == key2) { - success = true; - pRes1->row++; - pRes2->row++; - break; - } else if (key1 < key2) { - pRes1->row++; - } else if (key1 > key2) { - pRes2->row++; - } + + int32_t numOfTableHasRes = 0; + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + if (pSql->pSubs[i] != 0) { + numOfTableHasRes++; + } + } + + if (numOfTableHasRes >= 2) { // do merge result + + success = (doSetResultRowData(pSql->pSubs[0]) != NULL) && + (doSetResultRowData(pSql->pSubs[1]) != NULL); + // TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; + // TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; + // printf("first:%" PRId64 ", second:%" PRId64 "\n", key1, key2); + } else { // only one subquery + SSqlObj *pSub = pSql->pSubs[0]; + if (pSub == NULL) { + pSub = pSql->pSubs[1]; } - } else { - SSqlRes *pRes1 = &pSql->pSubs[0]->res; - doSetResultRowData(pSql->pSubs[0]); - success = (pRes1->row++ < pRes1->numOfRows); + success = (doSetResultRowData(pSub) != NULL); } - if (success) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + if (success) { // current row of final output has been built, return to app + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { int32_t tableIndex = pRes->pColumnIndex[i].tableIndex; int32_t columnIndex = pRes->pColumnIndex[i].columnIndex; @@ -536,8 +525,32 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { pRes->tsrow[i] = pRes1->tsrow[columnIndex]; } + pRes->numOfTotalInCurrentClause++; + break; - } else { + } else { // continue retrieve data from vnode + if (!tscHashRemainDataInSubqueryResultSet(pSql)) { + tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1); + SSubqueryState *pState = NULL; + + // free all sub sqlobj + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj *pChildObj = pSql->pSubs[i]; + if (pChildObj == NULL) { + continue; + } + + SJoinSubquerySupporter *pSupporter = (SJoinSubquerySupporter *)pChildObj->param; + pState = pSupporter->pState; + + tscDestroyJoinSupporter(pChildObj->param); + taos_free_result(pChildObj); + } + + free(pState); + return NULL; + } + tscFetchDatablockFromSubquery(pSql); if (pRes->code != TSDB_CODE_SUCCESS) { return NULL; @@ -559,73 +572,75 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) { if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) { tscFetchDatablockFromSubquery(pSql); + if (pRes->code == TSDB_CODE_SUCCESS) { - return tscJoinResultsetFromBuf(pSql); + tscTrace("%p data from all subqueries have been retrieved to client", pSql); + return tscBuildResFromSubqueries(pSql); } else { + tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code); return NULL; } } else if (pRes->row >= pRes->numOfRows) { + /** + * NOT a join query + * + * If the data block of current result set have been consumed already, try fetch next result + * data block from virtual node. + */ tscResetForNextRetrieve(pRes); if (pCmd->command < TSDB_SQL_LOCAL) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } - tscProcessSql(pSql); - if (pRes->numOfRows == 0) { - return NULL; + tscProcessSql(pSql); // retrieve data from virtual node + + //if failed to retrieve data from current virtual node, try next one if exists + if (hasMoreVnodesToTry(pSql)) { + tscTryQueryNextVnode(pSql, NULL); } - // local reducer has handle this situation + /* + * local reducer has handle this case, + * so no need to add the pRes->numOfRows for super table query + */ if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { - pRes->numOfTotal += pRes->numOfRows; + pRes->numOfTotalInCurrentClause += pRes->numOfRows; + } + + if (pRes->numOfRows == 0) { + return NULL; } } - return getOneRowFromBuf(pSql); + return doSetResultRowData(pSql); } TAOS_ROW taos_fetch_row(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; if (pSql == NULL || pSql->signature != pSql) { globalCode = TSDB_CODE_DISCONNECTED; return NULL; } - // projection query on metric, pipeline retrieve data from vnode list, instead of two-stage merge + /* + * projection query on super table, access each virtual node sequentially retrieve data from vnode list, + * instead of two-stage merge + */ TAOS_ROW rows = taos_fetch_row_impl(res); - while (rows == NULL && tscProjectionQueryOnMetric(pCmd)) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - - // reach the maximum number of output rows, abort - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { - return NULL; - } - - /* - * update the limit and offset value according to current retrieval results - * Note: if pRes->offset > 0, pRes->numOfRows = 0, pRes->numOfTotal = 0; - */ - pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - pCmd->limit.offset = pRes->offset; - - assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); - - if ((++pCmd->vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { - pCmd->command = TSDB_SQL_SELECT; - assert(pSql->fp == NULL); - tscProcessSql(pSql); - rows = taos_fetch_row_impl(res); - } + if (rows != NULL) { + return rows; + } - // check!!! - if (rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { - break; - } + // current subclause is completed, try the next subclause + while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) { + tscTryQueryNextClause(pSql, NULL); + + // if the rows is not NULL, return immediately + rows = taos_fetch_row_impl(res); } return rows; @@ -647,37 +662,34 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { // projection query on metric, pipeline retrieve data from vnode list, // instead of two-stage mergevnodeProcessMsgFromShell free qhandle nRows = taos_fetch_block_impl(res, rows); - while (*rows == NULL && tscProjectionQueryOnMetric(pCmd)) { - /* reach the maximum number of output rows, abort */ - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { - return 0; - } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + // current subclause is completed, try the next subclause + while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - /* update the limit value according to current retrieval results */ - pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal; - pCmd->limit.offset = pRes->offset; + pSql->cmd.command = pQueryInfo->command; + pCmd->clauseIndex++; + pRes->numOfTotal += pRes->numOfTotalInCurrentClause; + pRes->numOfTotalInCurrentClause = 0; + pRes->rspType = 0; - if ((++pSql->cmd.vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { - pSql->cmd.command = TSDB_SQL_SELECT; - assert(pSql->fp == NULL); - tscProcessSql(pSql); - nRows = taos_fetch_block_impl(res, rows); - } + pSql->numOfSubs = 0; + tfree(pSql->pSubs); - // check!!! - if (*rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { - break; - } + assert(pSql->fp == NULL); + + tscTrace("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause); + tscProcessSql(pSql); + + nRows = taos_fetch_block_impl(res, rows); } return nRows; } int taos_select_db(TAOS *taos, const char *db) { - char sql[64]; + char sql[256] = {0}; STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -685,12 +697,11 @@ int taos_select_db(TAOS *taos, const char *db) { return TSDB_CODE_DISCONNECTED; } - sprintf(sql, "use %s", db); - + snprintf(sql, tListLen(sql), "use %s", db); return taos_query(taos, sql); } -void taos_free_result(TAOS_RES *res) { +void taos_free_result_imp(TAOS_RES* res, int keepCmd) { if (res == NULL) return; SSqlObj *pSql = (SSqlObj *)res; @@ -708,6 +719,8 @@ void taos_free_result(TAOS_RES *res) { pSql->thandle = NULL; tscFreeSqlObj(pSql); tscTrace("%p Async SqlObj is freed by app", pSql); + } else if (keepCmd) { + tscFreeSqlResult(pSql); } else { tscFreeSqlObjPartial(pSql); } @@ -715,9 +728,15 @@ void taos_free_result(TAOS_RES *res) { } // set freeFlag to 1 in retrieve message if there are un-retrieved results - pCmd->type = TSDB_QUERY_TYPE_FREE_RESOURCE; + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (pQueryInfo == NULL) { + tscFreeSqlObjPartial(pSql); + return; + } - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); /* * case 1. Partial data have been retrieved from vnodes, but not all data has been retrieved yet. @@ -735,6 +754,8 @@ void taos_free_result(TAOS_RES *res) { pSql->pStream == NULL && pMeterMetaInfo->pMeterMeta != NULL))) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + tscTrace("%p code:%d, numOfRows:%d, command:%d", pSql, pRes->code, pRes->numOfRows, pCmd->command); + void *fp = pSql->fp; if (fp != NULL) { pSql->freed = 1; @@ -757,8 +778,13 @@ void taos_free_result(TAOS_RES *res) { * Then this object will be reused and no free operation is required. */ pSql->thandle = NULL; - tscFreeSqlObjPartial(pSql); - tscTrace("%p sql result is freed by app", pSql); + if (keepCmd) { + tscFreeSqlResult(pSql); + tscTrace("%p sql result is freed by app while sql command is kept", pSql); + } else { + tscFreeSqlObjPartial(pSql); + tscTrace("%p sql result is freed by app", pSql); + } } } else { // if no free resource msg is sent to vnode, we free this object immediately. @@ -768,6 +794,9 @@ void taos_free_result(TAOS_RES *res) { assert(pRes->numOfRows == 0 || (pCmd->command > TSDB_SQL_LOCAL)); tscFreeSqlObj(pSql); tscTrace("%p Async sql result is freed by app", pSql); + } else if (keepCmd) { + tscFreeSqlResult(pSql); + tscTrace("%p sql result is freed while sql command is kept", pSql); } else { tscFreeSqlObjPartial(pSql); tscTrace("%p sql result is freed", pSql); @@ -775,6 +804,10 @@ void taos_free_result(TAOS_RES *res) { } } +void taos_free_result(TAOS_RES *res) { + taos_free_result_imp(res, 0); +} + int taos_errno(TAOS *taos) { STscObj *pObj = (STscObj *)taos; int code; @@ -789,23 +822,45 @@ int taos_errno(TAOS *taos) { return code; } +static bool validErrorCode(int32_t code) { + return code >= TSDB_CODE_SUCCESS && code < TSDB_CODE_MAX_ERROR_CODE; +} + +/* + * In case of invalid sql error, additional information is attached to explain + * why the sql is invalid + */ +static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd* pCmd) { + if (code != TSDB_CODE_INVALID_SQL) { + return false; + } + + size_t len = strlen(pCmd->payload); + + char* z = NULL; + if (len > 0) { + z = strstr (pCmd->payload, "invalid SQL"); + } + + return z != NULL; +} + char *taos_errstr(TAOS *taos) { STscObj *pObj = (STscObj *)taos; uint8_t code; -// char temp[256] = {0}; if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode]; - if ((int8_t)(pObj->pSql->res.code) == -1) - code = TSDB_CODE_OTHERS; - else - code = pObj->pSql->res.code; + SSqlObj* pSql = pObj->pSql; + + if (validErrorCode(pSql->res.code)) { + code = pSql->res.code; + } else { + code = TSDB_CODE_OTHERS; //unknown error + } - // for invalid sql, additional information is attached to explain why the sql is invalid - if (code == TSDB_CODE_INVALID_SQL) { -// snprintf(temp, tListLen(temp), "invalid SQL: %s", pObj->pSql->cmd.payload); -// strcpy(pObj->pSql->cmd.payload, temp); - return pObj->pSql->cmd.payload; + if (hasAdditionalErrorInfo(code, &pSql->cmd)) { + return pSql->cmd.payload; } else { return tsError[code]; } @@ -830,12 +885,15 @@ void taos_stop_query(TAOS_RES *res) { if (res == NULL) return; SSqlObj *pSql = (SSqlObj *)res; + SSqlCmd *pCmd = &pSql->cmd; + if (pSql->signature != pSql) return; tscTrace("%p start to cancel query", res); pSql->res.code = TSDB_CODE_QUERY_CANCELLED; - if (tscIsTwoStageMergeMetricQuery(&pSql->cmd)) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) { tscKillMetricQuery(pSql); return; } @@ -856,56 +914,61 @@ void taos_stop_query(TAOS_RES *res) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int len = 0; for (int i = 0; i < num_fields; ++i) { + if (i > 0) { + str[len++] = ' '; + } + if (row[i] == NULL) { - len += sprintf(str + len, "%s ", TSDB_DATA_NULL_STR); + len += sprintf(str + len, "%s", TSDB_DATA_NULL_STR); continue; } switch (fields[i].type) { case TSDB_DATA_TYPE_TINYINT: - len += sprintf(str + len, "%d ", *((char *)row[i])); + len += sprintf(str + len, "%d", *((char *)row[i])); break; case TSDB_DATA_TYPE_SMALLINT: - len += sprintf(str + len, "%d ", *((short *)row[i])); + len += sprintf(str + len, "%d", *((short *)row[i])); break; case TSDB_DATA_TYPE_INT: - len += sprintf(str + len, "%d ", *((int *)row[i])); + len += sprintf(str + len, "%d", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - len += sprintf(str + len, "%lld ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); break; - case TSDB_DATA_TYPE_FLOAT: - len += sprintf(str + len, "%f ", *((float *)row[i])); - break; + case TSDB_DATA_TYPE_FLOAT: { + float fv = 0; + fv = GET_FLOAT_VAL(row[i]); + len += sprintf(str + len, "%f", fv); + } break; - case TSDB_DATA_TYPE_DOUBLE: - len += sprintf(str + len, "%lf ", *((double *)row[i])); - break; + case TSDB_DATA_TYPE_DOUBLE: { + double dv = 0; + dv = GET_DOUBLE_VAL(row[i]); + len += sprintf(str + len, "%lf", dv); + } break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: { - /* limit the max length of string to no greater than the maximum length, - * in case of not null-terminated string */ - size_t xlen = strlen(row[i]); - size_t trueLen = MIN(xlen, fields[i].bytes); - - memcpy(str + len, (char*) row[i], trueLen); - - str[len + trueLen] = ' '; - len += (trueLen + 1); - } - break; + size_t xlen = 0; + for (xlen = 0; xlen <= fields[i].bytes; xlen++) { + char c = ((char*)row[i])[xlen]; + if (c == 0) break; + str[len++] = c; + } + str[len] = 0; + } break; case TSDB_DATA_TYPE_TIMESTAMP: - len += sprintf(str + len, "%lld ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_BOOL: - len += sprintf(str + len, "%d ", *((int8_t *)row[i])); + len += sprintf(str + len, "%d", *((int8_t *)row[i])); default: break; } @@ -926,11 +989,12 @@ int taos_validate_sql(TAOS *taos, const char *sql) { pRes->numOfRows = 1; pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj); int32_t sqlLen = strlen(sql); - if (sqlLen > TSDB_MAX_SQL_LEN) { + if (sqlLen > tsMaxSQLStringLen) { tscError("%p sql too long", pSql); pRes->code = TSDB_CODE_INVALID_SQL; return pRes->code; @@ -948,11 +1012,11 @@ int taos_validate_sql(TAOS *taos, const char *sql) { pSql->asyncTblPos = NULL; if (NULL != pSql->pTableHashList) { - taosCleanUpIntHash(pSql->pTableHashList); + taosCleanUpHashTable(pSql->pTableHashList); pSql->pTableHashList = NULL; } - pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); + pRes->code = (uint8_t)tsParseSql(pSql, false); int code = pRes->code; tscTrace("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); @@ -961,9 +1025,8 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return code; } -static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t tblListLen) { +static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) { // must before clean the sqlcmd object - tscRemoveAllMeterMetaInfo(&pSql->cmd, false); tscCleanSqlCmd(&pSql->cmd); SSqlCmd *pCmd = &pSql->cmd; @@ -972,11 +1035,14 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t pCmd->count = 0; int code = TSDB_CODE_INVALID_METER_ID; - char *str = (char*) tblNameList; + char *str = (char *)tblNameList; + + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo); - SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); + SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo); - if ((code = tscAllocPayload(pCmd, tblListLen+16)) != TSDB_CODE_SUCCESS) { + if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) { return code; } @@ -998,7 +1064,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t strtrim(tblName); len = (uint32_t)strlen(tblName); - + SSQLToken sToken = {.n = len, .type = TK_ID, .z = tblName}; tSQLGetToken(tblName, &sToken.type); @@ -1009,7 +1075,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t return code; } - if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) { + if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) { return code; } @@ -1042,7 +1108,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t } int taos_load_table_info(TAOS *taos, const char *tableNameList) { - const int32_t MAX_TABLE_NAME_LENGTH = 12*1024*1024; // 12MB list + const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -1054,6 +1120,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { SSqlRes *pRes = &pSql->res; pRes->numOfTotal = 0; // the number of getting table meta from server + pRes->numOfTotalInCurrentClause = 0; + pRes->code = 0; assert(pSql->fp == NULL); @@ -1066,7 +1134,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { return pRes->code; } - char* str = calloc(1, tblListLen + 1); + char *str = calloc(1, tblListLen + 1); if (str == NULL) { pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscError("%p failed to malloc sql string buffer", pSql); @@ -1074,7 +1142,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { } strtolower(str, tableNameList); - pRes->code = (uint8_t) tscParseTblNameList(pSql, str, tblListLen); + pRes->code = (uint8_t)tscParseTblNameList(pSql, str, tblListLen); /* * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query. diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 288f906594f6c534376f6540505a0884d3d1c661..1b5b55352ebca20ec8d4496b76072bba32139568 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -31,9 +31,13 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql); static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer); -static bool isProjectStream(SSqlCmd *pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); +static int64_t getDelayValueAfterTimewindowClosed(SSqlStream* pStream, int64_t launchDelay) { + return taosGetTimestamp(pStream->precision) + launchDelay - pStream->stime - 1; +} + +static bool isProjectStream(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId != TSDB_FUNC_PRJ) { return false; } @@ -66,27 +70,29 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { pSql->fp = tscProcessStreamQueryCallback; pSql->param = pStream; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); - int code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + int code = tscGetMeterMeta(pSql, pMeterMetaInfo); pSql->res.code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; - if (code == 0 && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - code = tscGetMetricMeta(pSql); + if (code == 0 && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { + code = tscGetMetricMeta(pSql, 0); pSql->res.code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; } - tscTansformSQLFunctionForMetricQuery(&pSql->cmd); + tscTansformSQLFunctionForSTableQuery(pQueryInfo); // failed to get meter/metric meta, retry in 10sec. if (code != TSDB_CODE_SUCCESS) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p,get metermeta failed, retry in %lldms", pStream->pSql, pStream, retryDelayTime); - + tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); + tscSetRetryTimer(pStream, pSql, retryDelayTime); return; } @@ -105,22 +111,23 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { pStream->numOfRes = 0; // reset the numOfRes. SSqlObj *pSql = pStream->pSql; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); tscTrace("%p add into timer", pSql); - if (isProjectStream(&pSql->cmd)) { + if (isProjectStream(pQueryInfo)) { /* - * pSql->cmd.etime, which is the start time, does not change in case of + * pQueryInfo->etime, which is the start time, does not change in case of * repeat first execution, once the first execution failed. */ - pSql->cmd.stime = pStream->stime; // start time + pQueryInfo->stime = pStream->stime; // start time - pSql->cmd.etime = taosGetTimestamp(pStream->precision); // end time - if (pSql->cmd.etime > pStream->etime) { - pSql->cmd.etime = pStream->etime; + pQueryInfo->etime = taosGetTimestamp(pStream->precision); // end time + if (pQueryInfo->etime > pStream->etime) { + pQueryInfo->etime = pStream->etime; } } else { - pSql->cmd.stime = pStream->stime - pStream->interval; - pSql->cmd.etime = pStream->stime - 1; + pQueryInfo->stime = pStream->stime - pStream->interval; + pQueryInfo->etime = pStream->stime - 1; } // launch stream computing in a new thread @@ -136,12 +143,12 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf SSqlStream *pStream = (SSqlStream *)param; if (tres == NULL || numOfRows < 0) { int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, query data failed, code:%d, retry in %lldms", pStream->pSql, pStream, numOfRows, + tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, retryDelay); - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0, 0); tscClearMeterMetaInfo(pMeterMetaInfo, true); - + tscSetRetryTimer(pStream, pStream->pSql, retryDelay); return; } @@ -158,31 +165,32 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) { if (timestamp != actualTimestamp) { // reset the timestamp of each agg point by using start time of each interval *((int64_t *)pRes->data) = actualTimestamp; - tscWarn("%p stream:%p, timestamp of points is:%lld, reset to %lld", pSql, pStream, timestamp, actualTimestamp); + tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64 "", pSql, pStream, timestamp, actualTimestamp); } } static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) { SSqlStream * pStream = (SSqlStream *)param; SSqlObj * pSql = (SSqlObj *)res; - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0); if (pSql == NULL || numOfRows < 0) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, retrieve data failed, code:%d, retry in %lldms", pSql, pStream, numOfRows, retryDelayTime); + tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); tscClearMeterMetaInfo(pMeterMetaInfo, true); - + tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); return; } if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful. pStream->numOfRes += numOfRows; - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + for(int32_t i = 0; i < numOfRows; ++i) { TAOS_ROW row = taos_fetch_row(res); tscTrace("%p stream:%p fetch result", pSql, pStream); - if (isProjectStream(&pSql->cmd)) { + if (isProjectStream(pQueryInfo)) { pStream->stime = *(TSKEY *)row[0]; } else { tscSetTimestampForRes(pStream, pSql); @@ -197,9 +205,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf } else { // numOfRows == 0, all data has been retrieved pStream->useconds += pSql->res.useconds; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (pStream->numOfRes == 0) { - if (pSql->cmd.interpoType == TSDB_INTERPO_SET_VALUE || pSql->cmd.interpoType == TSDB_INTERPO_NULL) { - SSqlCmd *pCmd = &pSql->cmd; + if (pQueryInfo->interpoType == TSDB_INTERPO_SET_VALUE || pQueryInfo->interpoType == TSDB_INTERPO_NULL) { SSqlRes *pRes = &pSql->res; /* failed to retrieve any result in this retrieve */ @@ -209,12 +218,12 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf void *oldPtr = pSql->res.data; pSql->res.data = tmpRes; + + for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); + TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i); - for (int32_t i = 1; i < pSql->cmd.fieldsInfo.numOfOutputCols; ++i) { - int16_t offset = tscFieldInfoGetOffset(pCmd, i); - TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); - - assignVal(pSql->res.data + offset, (char *)(&pCmd->defaultVal[i]), pField->bytes, pField->type); + assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->defaultVal[i]), pField->bytes, pField->type); row[i] = pSql->res.data + offset; } @@ -222,7 +231,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf row[0] = pRes->data; // char result[512] = {0}; - // taos_print_row(result, row, pSql->cmd.fieldsInfo.pFields, pSql->cmd.fieldsInfo.numOfOutputCols); + // taos_print_row(result, row, pQueryInfo->fieldsInfo.pFields, pQueryInfo->fieldsInfo.numOfOutputCols); // tscPrint("%p stream:%p query result: %s", pSql, pStream, result); tscTrace("%p stream:%p fetch result", pSql, pStream); @@ -231,18 +240,19 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf pRes->numOfRows = 0; pRes->data = oldPtr; - } else if (isProjectStream(&pSql->cmd)) { + } else if (isProjectStream(pQueryInfo)) { /* no resuls in the query range, retry */ // todo set retry dynamic time int32_t retry = tsProjectExecInterval; - tscError("%p stream:%p, retrieve no data, code:%d, retry in %lldms", pSql, pStream, numOfRows, retry); + tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retry); tscClearSqlMetaInfoForce(&(pStream->pSql->cmd)); + tscSetRetryTimer(pStream, pStream->pSql, retry); return; } } else { - if (isProjectStream(&pSql->cmd)) { + if (isProjectStream(pQueryInfo)) { pStream->stime += 1; } } @@ -257,7 +267,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf } static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) { - if (isProjectStream(&pSql->cmd)) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + int64_t delay = getDelayValueAfterTimewindowClosed(pStream, timer); + + if (isProjectStream(pQueryInfo)) { int64_t now = taosGetTimestamp(pStream->precision); int64_t etime = now > pStream->etime ? pStream->etime : now; @@ -265,22 +278,22 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) /* * current time window will be closed, since it too early to exceed the maxRetentWindow value */ - tscTrace("%p stream:%p, etime:%lld is too old, exceeds the max retention time window:%lld, stop the stream", + tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } - - tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream, - now + timer, timer, pStream->stime, etime); + + tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, + now + timer, timer, delay, pStream->stime, etime); } else { - tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream, - pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1); + tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream, + pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1); } pSql->cmd.command = TSDB_SQL_SELECT; @@ -289,57 +302,72 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) taosTmrReset(tscProcessStreamTimer, timer, pStream, tscTmr, &pStream->pTimer); } +static int64_t getLaunchTimeDelay(const SSqlStream* pStream) { + int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio); + + int64_t maxDelay = + (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay; + + if (delayDelta > maxDelay) { + delayDelta = maxDelay; + } + + int64_t remainTimeWindow = pStream->slidingTime - delayDelta; + if (maxDelay > remainTimeWindow) { + maxDelay = (remainTimeWindow / 1.5); + } + + int64_t currentDelay = (rand() % maxDelay); // a random number + currentDelay += delayDelta; + assert(currentDelay < pStream->slidingTime); + + return currentDelay; +} + + static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { int64_t timer = 0; - - if (isProjectStream(&pSql->cmd)) { + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (isProjectStream(pQueryInfo)) { /* * for project query, no mater fetch data successfully or not, next launch will issue * more than the sliding time window */ timer = pStream->slidingTime; if (pStream->stime > pStream->etime) { - tscTrace("%p stream:%p, stime:%lld is larger than end time: %lld, stop the stream", pStream->pSql, pStream, + tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } } else { pStream->stime += pStream->slidingTime; if ((pStream->stime - pStream->interval) >= pStream->etime) { - tscTrace("%p stream:%p, stime:%ld is larger than end time: %ld, stop the stream", pStream->pSql, pStream, + tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } - + timer = pStream->stime - taosGetTimestamp(pStream->precision); if (timer < 0) { timer = 0; } } - int64_t delayDelta = (int64_t)(pStream->slidingTime * 0.1); - delayDelta = (rand() % delayDelta); - - int64_t maxDelay = - (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay; - - if (delayDelta > maxDelay) { - delayDelta = maxDelay; - } - - timer += delayDelta; // a random number + timer += getLaunchTimeDelay(pStream); + if (pStream->precision == TSDB_TIME_PRECISION_MICRO) { timer = timer / 1000L; } @@ -348,64 +376,70 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { } static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { - SSqlCmd *pCmd = &pSql->cmd; - int64_t minIntervalTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime; - if (pCmd->nAggTimeInterval < minIntervalTime) { - tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%lld", pSql, pStream, - pCmd->nAggTimeInterval, minIntervalTime); - pCmd->nAggTimeInterval = minIntervalTime; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + if (pQueryInfo->nAggTimeInterval < minIntervalTime) { + tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream, + pQueryInfo->nAggTimeInterval, minIntervalTime); + pQueryInfo->nAggTimeInterval = minIntervalTime; } - pStream->interval = pCmd->nAggTimeInterval; // it shall be derived from sql string + pStream->interval = pQueryInfo->nAggTimeInterval; // it shall be derived from sql string - if (pCmd->nSlidingTime == 0) { - pCmd->nSlidingTime = pCmd->nAggTimeInterval; + if (pQueryInfo->nSlidingTime == 0) { + pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval; } int64_t minSlidingTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime; - if (pCmd->nSlidingTime < minSlidingTime) { - tscWarn("%p stream:%p, original sliding value:%lld too small, reset to:%lld", pSql, pStream, pCmd->nSlidingTime, - minSlidingTime); + if (pQueryInfo->nSlidingTime == -1) { + pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval; + } else if (pQueryInfo->nSlidingTime < minSlidingTime) { + tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream, + pQueryInfo->nSlidingTime, minSlidingTime); - pCmd->nSlidingTime = minSlidingTime; + pQueryInfo->nSlidingTime = minSlidingTime; } - if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { - tscWarn("%p stream:%p, sliding value:%lld can not be larger than interval range, reset to:%lld", pSql, pStream, - pCmd->nSlidingTime, pCmd->nAggTimeInterval); + if (pQueryInfo->nSlidingTime > pQueryInfo->nAggTimeInterval) { + tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream, + pQueryInfo->nSlidingTime, pQueryInfo->nAggTimeInterval); - pCmd->nSlidingTime = pCmd->nAggTimeInterval; + pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval; } - pStream->slidingTime = pCmd->nSlidingTime; + pStream->slidingTime = pQueryInfo->nSlidingTime; + + pQueryInfo->nAggTimeInterval = 0; // clear the interval value to avoid the force time window split by query processor + pQueryInfo->nSlidingTime = 0; } static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) { - SSqlCmd *pCmd = &pSql->cmd; - - if (isProjectStream(pCmd)) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + if (isProjectStream(pQueryInfo)) { // no data in table, flush all data till now to destination meter, 10sec delay pStream->interval = tsProjectExecInterval; pStream->slidingTime = tsProjectExecInterval; if (stime != 0) { // first projection start from the latest event timestamp - assert(stime >= pCmd->stime); + assert(stime >= pQueryInfo->stime); stime += 1; // exclude the last records from table } else { - stime = pCmd->stime; + stime = pQueryInfo->stime; } } else { // timewindow based aggregation stream if (stime == 0) { // no data in meter till now stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval; - tscWarn("%p stream:%p, last timestamp:0, reset to:%lld", pSql, pStream, stime); + tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64 "", pSql, pStream, stime); } else { int64_t newStime = (stime / pStream->interval) * pStream->interval; if (newStime != stime) { - tscWarn("%p stream:%p, last timestamp:%lld, reset to:%lld", pSql, pStream, stime, newStime); + tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64 "", pSql, pStream, stime, newStime); stime = newStime; } } @@ -418,24 +452,12 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) { int64_t timer = pStream->stime - taosGetTimestamp(pStream->precision); if (timer < 0) timer = 0; - int64_t delayDelta = (int64_t)(pStream->interval * 0.1); - - int64_t maxDelay = - (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay; - if (delayDelta > maxDelay) { - delayDelta = maxDelay; - } - int64_t startDelay = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay; - - srand(time(NULL)); - timer += (rand() % delayDelta); // a random number - - if (timer < startDelay || timer > maxDelay) { - timer = (timer % startDelay) + startDelay; - } - + + timer += getLaunchTimeDelay(pStream); + timer += startDelay; + return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer; } @@ -447,7 +469,10 @@ static void setErrorInfo(STscObj* pObj, int32_t code, char* info) { SSqlCmd* pCmd = &pObj->pSql->cmd; pObj->pSql->res.code = code; - strncpy(pCmd->payload, info, pCmd->payloadLen); + + if (info != NULL) { + strncpy(pCmd->payload, info, pCmd->payloadLen); + } } TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), @@ -495,8 +520,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p return NULL; } - // TODO later refactor use enum - pSql->cmd.count = 1; // 1 means sql in stream, allowed the sliding clause. + pSql->cmd.inStream = 1; // 1 means sql in stream, allowed the sliding clause. pRes->code = tscToSQLCmd(pSql, &SQLInfo); SQLInfoDestroy(&SQLInfo); @@ -517,7 +541,8 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p return NULL; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); pStream->fp = fp; pStream->callback = callback; @@ -526,7 +551,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p pStream->precision = pMeterMetaInfo->pMeterMeta->precision; pStream->ctime = taosGetTimestamp(pStream->precision); - pStream->etime = pCmd->etime; + pStream->etime = pQueryInfo->etime; pSql->pStream = pStream; tscAddIntoStreamList(pStream); @@ -537,7 +562,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p int64_t starttime = tscGetLaunchTimestamp(pStream); taosTmrReset(tscProcessStreamTimer, starttime, pStream, tscTmr, &pStream->pTimer); - tscTrace("%p stream:%p is opened, query on:%s, interval:%lld, sliding:%lld, first launched in:%lld, sql:%s", pSql, + tscTrace("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql, pStream, pMeterMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, sqlstr); return pStream; diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index bcbcaba4c4a207c07763198db72357459630b977..610c119e6d327c9b8b372136959b07098ffaef2e 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -22,125 +22,410 @@ #include "tsclient.h" #include "tsocket.h" #include "ttime.h" +#include "ttimer.h" #include "tutil.h" +#include "tscUtil.h" +#include "tcache.h" +#include "tscProfile.h" -typedef struct { - void * signature; - char name[TSDB_METER_ID_LEN]; - int mseconds; - TSKEY lastKey; - uint64_t stime; - TAOS_FIELD fields[TSDB_MAX_COLUMNS]; - int numOfFields; - TAOS * taos; - TAOS_RES * result; +typedef struct SSubscriptionProgress { + int64_t uid; + TSKEY key; +} SSubscriptionProgress; + +typedef struct SSub { + void * signature; + char topic[32]; + int64_t lastSyncTime; + int64_t lastConsumeTime; + TAOS * taos; + void * pTimer; + SSqlObj * pSql; + int interval; + TAOS_SUBSCRIBE_CALLBACK fp; + void * param; + int numOfMeters; + SSubscriptionProgress * progress; } SSub; -TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *name, int64_t time, int mseconds) { - SSub *pSub; - pSub = (SSub *)malloc(sizeof(SSub)); - if (pSub == NULL) return NULL; - memset(pSub, 0, sizeof(SSub)); +static int tscCompareSubscriptionProgress(const void* a, const void* b) { + const SSubscriptionProgress* x = (const SSubscriptionProgress*)a; + const SSubscriptionProgress* y = (const SSubscriptionProgress*)b; + if (x->uid > y->uid) return 1; + if (x->uid < y->uid) return -1; + return 0; +} + +TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid) { + if (sub == NULL) + return 0; + + SSub* pSub = (SSub*)sub; + for (int s = 0, e = pSub->numOfMeters; s < e;) { + int m = (s + e) / 2; + SSubscriptionProgress* p = pSub->progress + m; + if (p->uid > uid) + e = m; + else if (p->uid < uid) + s = m + 1; + else + return p->key; + } + + return 0; +} + +void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) { + if( sub == NULL) + return; + + SSub* pSub = (SSub*)sub; + for (int s = 0, e = pSub->numOfMeters; s < e;) { + int m = (s + e) / 2; + SSubscriptionProgress* p = pSub->progress + m; + if (p->uid > uid) + e = m; + else if (p->uid < uid) + s = m + 1; + else { + if (ts >= p->key) p->key = ts; + break; + } + } +} + +static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* sql) { + SSub* pSub = calloc(1, sizeof(SSub)); + if (pSub == NULL) { + globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("failed to allocate memory for subscription"); + return NULL; + } + + SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); + if (pSql == NULL) { + globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("failed to allocate SSqlObj for subscription"); + goto failed; + } + + pSql->signature = pSql; + pSql->pTscObj = pObj; + + char* sqlstr = (char*)malloc(strlen(sql) + 1); + if (sqlstr == NULL) { + tscError("failed to allocate sql string for subscription"); + goto failed; + } + strcpy(sqlstr, sql); + strtolower(sqlstr, sqlstr); + pSql->sqlstr = sqlstr; + + tsem_init(&pSql->rspSem, 0, 0); + tsem_init(&pSql->emptyRspSem, 0, 1); + + SSqlRes *pRes = &pSql->res; + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + + pSql->pSubscription = pSub; + pSub->pSql = pSql; pSub->signature = pSub; - strcpy(pSub->name, name); - pSub->mseconds = mseconds; - pSub->lastKey = time; - if (pSub->lastKey == 0) { - pSub->lastKey = taosGetTimestampMs(); + strncpy(pSub->topic, topic, sizeof(pSub->topic)); + pSub->topic[sizeof(pSub->topic) - 1] = 0; + return pSub; + +failed: + if (sqlstr != NULL) { + free(sqlstr); + } + if (pSql != NULL) { + free(pSql); + } + free(pSub); + return NULL; +} + + +static void tscProcessSubscriptionTimer(void *handle, void *tmrId) { + SSub *pSub = (SSub *)handle; + if (pSub == NULL || pSub->pTimer != tmrId) return; + + TAOS_RES* res = taos_consume(pSub); + if (res != NULL) { + pSub->fp(pSub, res, pSub->param, 0); + } + + taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer); +} + + +int tscUpdateSubscription(STscObj* pObj, SSub* pSub) { + int code = (uint8_t)tsParseSql(pSub->pSql, false); + if (code != TSDB_CODE_SUCCESS) { + tscError("failed to parse sql statement: %s", pSub->topic); + return 0; + } + + SSqlCmd* pCmd = &pSub->pSql->cmd; + if (pCmd->command != TSDB_SQL_SELECT) { + tscError("only 'select' statement is allowed in subscription: %s", pSub->topic); + return 0; + } + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0, 0); + int numOfMeters = 0; + if (!UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { + SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; + for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i); + numOfMeters += pVnodeSidList->numOfSids; + } } - taos_init(); - pSub->taos = taos_connect(host, user, pass, NULL, 0); - if (pSub->taos == NULL) { - tfree(pSub); + SSubscriptionProgress* progress = (SSubscriptionProgress*)calloc(numOfMeters, sizeof(SSubscriptionProgress)); + if (progress == NULL) { + tscError("failed to allocate memory for progress: %s", pSub->topic); + return 0; + } + + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { + numOfMeters = 1; + int64_t uid = pMeterMetaInfo->pMeterMeta->uid; + progress[0].uid = uid; + progress[0].key = tscGetSubscriptionProgress(pSub, uid); } else { - char qstr[128]; - sprintf(qstr, "use %s", db); - int res = taos_query(pSub->taos, qstr); - if (res != 0) { - tscError("failed to open DB:%s", db); - taos_close(pSub->taos); - tfree(pSub); - } else { - sprintf(qstr, "select * from %s where _c0 > now+1000d", pSub->name); - if (taos_query(pSub->taos, qstr)) { - tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); - taos_close(pSub->taos); - tfree(pSub); - return NULL; + SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; + numOfMeters = 0; + for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i); + for (int32_t j = 0; j < pVnodeSidList->numOfSids; j++) { + SMeterSidExtInfo *pMeterInfo = tscGetMeterSidInfo(pVnodeSidList, j); + int64_t uid = pMeterInfo->uid; + progress[numOfMeters].uid = uid; + progress[numOfMeters++].key = tscGetSubscriptionProgress(pSub, uid); } - pSub->result = taos_use_result(pSub->taos); - pSub->numOfFields = taos_num_fields(pSub->result); - memcpy(pSub->fields, taos_fetch_fields(pSub->result), sizeof(TAOS_FIELD) * pSub->numOfFields); } + qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress); } - return pSub; + free(pSub->progress); + pSub->numOfMeters = numOfMeters; + pSub->progress = progress; + + pSub->lastSyncTime = taosGetTimestampMs(); + + return 1; } -TAOS_ROW taos_consume(TAOS_SUB *tsub) { - SSub * pSub = (SSub *)tsub; - TAOS_ROW row; - char qstr[256]; - if (pSub == NULL) return NULL; - if (pSub->signature != pSub) return NULL; - - while (1) { - if (pSub->result != NULL) { - row = taos_fetch_row(pSub->result); - if (row != NULL) { - pSub->lastKey = *((uint64_t *)row[0]); - return row; - } +static int tscLoadSubscriptionProgress(SSub* pSub) { + char buf[TSDB_MAX_SQL_LEN]; + sprintf(buf, "%s/subscribe/%s", dataDir, pSub->topic); - taos_free_result(pSub->result); - pSub->result = NULL; - uint64_t etime = taosGetTimestampMs(); - int64_t mseconds = pSub->mseconds - etime + pSub->stime; - if (mseconds < 0) mseconds = 0; - taosMsleep((int)mseconds); - } + FILE* fp = fopen(buf, "r"); + if (fp == NULL) { + tscTrace("subscription progress file does not exist: %s", pSub->topic); + return 1; + } - pSub->stime = taosGetTimestampMs(); + if (fgets(buf, sizeof(buf), fp) == NULL) { + tscTrace("invalid subscription progress file: %s", pSub->topic); + fclose(fp); + return 0; + } - sprintf(qstr, "select * from %s where _c0 > %lld order by _c0 asc", pSub->name, pSub->lastKey); - if (taos_query(pSub->taos, qstr)) { - tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); - return NULL; + for (int i = 0; i < sizeof(buf); i++) { + if (buf[i] == 0) + break; + if (buf[i] == '\r' || buf[i] == '\n') { + buf[i] = 0; + break; } + } + if (strcmp(buf, pSub->pSql->sqlstr) != 0) { + tscTrace("subscription sql statement mismatch: %s", pSub->topic); + fclose(fp); + return 0; + } - pSub->result = taos_use_result(pSub->taos); + if (fgets(buf, sizeof(buf), fp) == NULL || atoi(buf) < 0) { + tscTrace("invalid subscription progress file: %s", pSub->topic); + fclose(fp); + return 0; + } - if (pSub->result == NULL) { - tscTrace("failed to get result, reason:%s", taos_errstr(pSub->taos)); - return NULL; + int numOfMeters = atoi(buf); + SSubscriptionProgress* progress = calloc(numOfMeters, sizeof(SSubscriptionProgress)); + for (int i = 0; i < numOfMeters; i++) { + if (fgets(buf, sizeof(buf), fp) == NULL) { + fclose(fp); + free(progress); + return 0; } + int64_t uid, key; + sscanf(buf, "%" SCNd64 ":%" SCNd64, &uid, &key); + progress[i].uid = uid; + progress[i].key = key; } - return NULL; + fclose(fp); + + qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress); + pSub->numOfMeters = numOfMeters; + pSub->progress = progress; + tscTrace("subscription progress loaded, %d tables: %s", numOfMeters, pSub->topic); + return 1; } -void taos_unsubscribe(TAOS_SUB *tsub) { - SSub *pSub = (SSub *)tsub; +void tscSaveSubscriptionProgress(void* sub) { + SSub* pSub = (SSub*)sub; - if (pSub == NULL) return; - if (pSub->signature != pSub) return; + char path[256]; + sprintf(path, "%s/subscribe", dataDir); + if (access(path, 0) != 0) { + mkdir(path, 0777); + } - taos_close(pSub->taos); - free(pSub); + sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic); + FILE* fp = fopen(path, "w+"); + if (fp == NULL) { + tscError("failed to create progress file for subscription: %s", pSub->topic); + return; + } + + fputs(pSub->pSql->sqlstr, fp); + fprintf(fp, "\n%d\n", pSub->numOfMeters); + for (int i = 0; i < pSub->numOfMeters; i++) { + int64_t uid = pSub->progress[i].uid; + TSKEY key = pSub->progress[i].key; + fprintf(fp, "%" PRId64 ":%" PRId64 "\n", uid, key); + } + + fclose(fp); } -int taos_subfields_count(TAOS_SUB *tsub) { +TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) { + STscObj* pObj = (STscObj*)taos; + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + tscError("connection disconnected"); + return NULL; + } + + SSub* pSub = tscCreateSubscription(pObj, topic, sql); + if (pSub == NULL) { + return NULL; + } + pSub->taos = taos; + + if (restart) { + tscTrace("restart subscription: %s", topic); + } else { + tscLoadSubscriptionProgress(pSub); + } + + if (!tscUpdateSubscription(pObj, pSub)) { + taos_unsubscribe(pSub, 1); + return NULL; + } + + pSub->interval = interval; + if (fp != NULL) { + tscTrace("asynchronize subscription, create new timer", topic); + pSub->fp = fp; + pSub->param = param; + taosTmrReset(tscProcessSubscriptionTimer, interval, pSub, tscTmr, &pSub->pTimer); + } + + return pSub; +} + +void taos_free_result_imp(SSqlObj* pSql, int keepCmd); + +TAOS_RES *taos_consume(TAOS_SUB *tsub) { SSub *pSub = (SSub *)tsub; + if (pSub == NULL) return NULL; - return pSub->numOfFields; + tscSaveSubscriptionProgress(pSub); + + SSqlObj* pSql = pSub->pSql; + SSqlRes *pRes = &pSql->res; + + if (pSub->pTimer == NULL) { + int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime; + if (duration < (int64_t)(pSub->interval)) { + tscTrace("subscription consume too frequently, blocking..."); + taosMsleep(pSub->interval - (int32_t)duration); + } + } + + for (int retry = 0; retry < 3; retry++) { + tscRemoveFromSqlList(pSql); + + if (taosGetTimestampMs() - pSub->lastSyncTime > 10 * 60 * 1000) { + tscTrace("begin meter synchronization"); + char* sqlstr = pSql->sqlstr; + pSql->sqlstr = NULL; + taos_free_result_imp(pSql, 0); + pSql->sqlstr = sqlstr; + taosClearDataCache(tscCacheHandle); + if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL; + tscTrace("meter synchronization completed"); + } else { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + uint16_t type = pQueryInfo->type; + taos_free_result_imp(pSql, 1); + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + pRes->qhandle = 0; + pSql->thandle = NULL; + pSql->cmd.command = TSDB_SQL_SELECT; + pQueryInfo->type = type; + + tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->vnodeIndex = 0; + } + + tscDoQuery(pSql); + if (pRes->code != TSDB_CODE_NOT_ACTIVE_TABLE) { + break; + } + // meter was removed, make sync time zero, so that next retry will + // do synchronization first + pSub->lastSyncTime = 0; + } + + if (pRes->code != TSDB_CODE_SUCCESS) { + tscError("failed to query data, error code=%d", pRes->code); + tscRemoveFromSqlList(pSql); + return NULL; + } + + pSub->lastConsumeTime = taosGetTimestampMs(); + return pSql; } -TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub) { +void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) { SSub *pSub = (SSub *)tsub; + if (pSub == NULL || pSub->signature != pSub) return; - return pSub->fields; + if (pSub->pTimer != NULL) { + taosTmrStop(pSub->pTimer); + } + + if (keepProgress) { + tscSaveSubscriptionProgress(pSub); + } else { + char path[256]; + sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic); + remove(path); + } + + tscFreeSqlObj(pSub->pSql); + free(pSub->progress); + memset(pSub, 0, sizeof(*pSub)); + free(pSub); } diff --git a/src/client/src/tscSyntaxtreefunction.c b/src/client/src/tscSyntaxtreefunction.c index 914053f2f17461e0f5e7ffbd56691e7fd206cd49..1d82b0f239572676c204025fe535704c192e4da6 100644 --- a/src/client/src/tscSyntaxtreefunction.c +++ b/src/client/src/tscSyntaxtreefunction.c @@ -26,7 +26,7 @@ int32_t step = ((_ord) == TSQL_SO_ASC) ? 1 : -1; \ \ if ((len1) == (len2)) { \ - for (; i < (len2) && i >= 0; i += step, (out) += step) { \ + for (; i < (len2) && i >= 0; i += step, (out) += 1) { \ if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -34,7 +34,7 @@ *(out) = (double)(left)[i] op(right)[i]; \ } \ } else if ((len1) == 1) { \ - for (; i >= 0 && i < (len2); i += step, (out) += step) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -42,7 +42,7 @@ *(out) = (double)(left)[0] op(right)[i]; \ } \ } else if ((len2) == 1) { \ - for (; i >= 0 && i < (len1); i += step, (out) += step) { \ + for (; i >= 0 && i < (len1); i += step, (out) += 1) { \ if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -58,7 +58,7 @@ int32_t step = (_ord == TSQL_SO_ASC) ? 1 : -1; \ \ if (len1 == (len2)) { \ - for (; i >= 0 && i < (len2); i += step, (out) += step) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ if (isNull((char *)&(left[i]), _left_type) || isNull((char *)&(right[i]), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -66,7 +66,7 @@ *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \ } \ } else if (len1 == 1) { \ - for (; i >= 0 && i < (len2); i += step, (out) += step) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ if (isNull((char *)(left), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -74,7 +74,7 @@ *(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \ } \ } else if ((len2) == 1) { \ - for (; i >= 0 && i < len1; i += step, (out) += step) { \ + for (; i >= 0 && i < len1; i += step, (out) += 1) { \ if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)(right), _right_type)) { \ setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \ continue; \ @@ -112,7 +112,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -121,7 +121,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] + pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -130,7 +130,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] + pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -310,7 +310,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)&(pOutput[i]), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -318,7 +318,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] - pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -326,7 +326,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] - pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -521,7 +521,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -530,7 +530,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num *pOutput = (double)pLeft[i] * pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -539,7 +539,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num *pOutput = (double)pLeft[0] * pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -719,7 +719,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -728,7 +728,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] / pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -737,7 +737,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] / pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -933,7 +933,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -942,7 +942,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -951,7 +951,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi *pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -991,7 +991,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; if (numLeft == numRight) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -1000,7 +1000,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i]; } } else if (numLeft == 1) { - for (; i >= 0 && i < numRight; i += step, pOutput += step) { + for (; i >= 0 && i < numRight; i += step, pOutput += 1) { if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; @@ -1009,7 +1009,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh *pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i]; } } else if (numRight == 1) { - for (; i >= 0 && i < numLeft; i += step, pOutput += step) { + for (; i >= 0 && i < numLeft; i += step, pOutput += 1) { if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) { setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); continue; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 60b90ac32884d9a3f55d2e9dbfca356f0725d734..6c685b06b4e109cee43262a2ae382f56c495d9ce 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -48,6 +48,7 @@ static pthread_once_t tscinit = PTHREAD_ONCE_INIT; extern int tsTscEnableRecordSql; extern int tsNumOfLogLines; void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); +void deltaToUtcInitOnce(); void tscCheckDiskUsage(void *para, void *unused) { taosGetDisk(); @@ -60,6 +61,7 @@ void taos_init_imp() { SRpcInit rpcInit; srand(taosGetTimestampSec()); + deltaToUtcInitOnce(); if (tscEmbedded == 0) { /* @@ -93,7 +95,6 @@ void taos_init_imp() { taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); } -#ifdef CLUSTER tscMgmtIpList.numOfIps = 2; strcpy(tscMgmtIpList.ipstr[0], tsMasterIp); tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); @@ -106,7 +107,6 @@ void taos_init_imp() { strcpy(tscMgmtIpList.ipstr[2], tsSecondIp); tscMgmtIpList.ip[2] = inet_addr(tsSecondIp); } -#endif tscInitMsgs(); slaveIndex = rand(); @@ -198,7 +198,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { switch (option) { case TSDB_OPTION_CONFIGDIR: cfg = tsGetConfigOption("configDir"); - if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { strncpy(configDir, pStr, TSDB_FILENAME_LEN); cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; tscPrint("set config file directory:%s", pStr); @@ -210,7 +212,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { case TSDB_OPTION_SHELL_ACTIVITY_TIMER: cfg = tsGetConfigOption("shellActivityTimer"); - if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { tsShellActivityTimer = atoi(pStr); if (tsShellActivityTimer < 1) tsShellActivityTimer = 1; if (tsShellActivityTimer > 3600) tsShellActivityTimer = 3600; @@ -224,13 +228,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { case TSDB_OPTION_LOCALE: { // set locale cfg = tsGetConfigOption("locale"); + assert(cfg != NULL); + size_t len = strlen(pStr); if (len == 0 || len > TSDB_LOCALE_LEN) { tscPrint("Invalid locale:%s, use default", pStr); return -1; } - if (cfg && cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { char sep = '.'; if (strlen(tsLocale) == 0) { // locale does not set yet @@ -285,13 +291,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { case TSDB_OPTION_CHARSET: { /* set charset will override the value of charset, assigned during system locale changed */ cfg = tsGetConfigOption("charset"); + assert(cfg != NULL); + size_t len = strlen(pStr); if (len == 0 || len > TSDB_LOCALE_LEN) { tscPrint("failed to set charset:%s", pStr); return -1; } - if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { if (taosValidateEncodec(pStr)) { if (strlen(tsCharset) == 0) { tscPrint("charset is set:%s", pStr); @@ -314,7 +322,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { case TSDB_OPTION_TIMEZONE: cfg = tsGetConfigOption("timezone"); - if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { strcpy(tsTimezone, pStr); tsSetTimeZone(); cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION; @@ -327,7 +337,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { case TSDB_OPTION_SOCKET_TYPE: cfg = tsGetConfigOption("sockettype"); - if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + assert(cfg != NULL); + + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { if (strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_UDP) != 0 && strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_TCP) != 0) { tscError("only 'tcp' or 'udp' allowed for configuring the socket type"); return -1; @@ -340,6 +352,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { break; default: + // TODO return the correct error code to client in the format for taos_errstr() tscError("Invalid option %d", option); return -1; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 52e19ce244956b5f08aaac43843e82ed1c4add4c..22027ab54a28bee78bba67ffa753136b7d99d0fe 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -14,7 +14,8 @@ */ #include "os.h" -#include "ihash.h" +#include "tscUtil.h" +#include "hash.h" #include "taosmsg.h" #include "tcache.h" #include "tkey.h" @@ -22,7 +23,6 @@ #include "tscJoinProcess.h" #include "tscProfile.h" #include "tscSecondaryMerge.h" -#include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" #include "tsqldef.h" @@ -37,9 +37,9 @@ * fullmetername + '.' + '(nil)' + '.' + '(nil)' + relation + '.' + [tagId1, * tagId2,...] + '.' + group_orderType */ -void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { +void tscGetMetricMetaCacheKey(SQueryInfo* pQueryInfo, char* str, uint64_t uid) { int32_t index = -1; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoByUid(pCmd, uid, &index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoByUid(pQueryInfo, uid, &index); int32_t len = 0; char tagIdBuf[128] = {0}; @@ -47,11 +47,10 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { len += sprintf(&tagIdBuf[len], "%d,", pMeterMetaInfo->tagColumnIndex[i]); } - STagCond* pTagCond = &pCmd->tagCond; + STagCond* pTagCond = &pQueryInfo->tagCond; assert(len < tListLen(tagIdBuf)); const int32_t maxKeySize = TSDB_MAX_TAGS_LEN; // allowed max key size - char* tmp = calloc(1, TSDB_MAX_SQL_LEN); SCond* cond = tsGetMetricQueryCondPos(pTagCond, uid); @@ -60,12 +59,23 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { sprintf(join, "%s,%s", pTagCond->joinInfo.left.meterId, pTagCond->joinInfo.right.meterId); } - int32_t keyLen = - snprintf(tmp, TSDB_MAX_SQL_LEN, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name, - (cond != NULL ? cond->cond.z : NULL), pTagCond->tbnameCond.cond.n > 0 ? pTagCond->tbnameCond.cond.z : NULL, - pTagCond->relType, join, tagIdBuf, pCmd->groupbyExpr.orderType); + // estimate the buffer size + size_t tbnameCondLen = pTagCond->tbnameCond.cond != NULL ? strlen(pTagCond->tbnameCond.cond) : 0; + size_t redundantLen = 20; + + size_t bufSize = strlen(pMeterMetaInfo->name) + tbnameCondLen + strlen(join) + strlen(tagIdBuf); + if (cond != NULL) { + bufSize += strlen(cond->cond); + } - assert(keyLen <= TSDB_MAX_SQL_LEN); + bufSize = (size_t)((bufSize + redundantLen) * 1.5); + char* tmp = calloc(1, bufSize); + + int32_t keyLen = snprintf(tmp, bufSize, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name, + (cond != NULL ? cond->cond : NULL), (tbnameCondLen > 0 ? pTagCond->tbnameCond.cond : NULL), + pTagCond->relType, join, tagIdBuf, pQueryInfo->groupbyExpr.orderType); + + assert(keyLen <= bufSize); if (keyLen < maxKeySize) { strcpy(str, tmp); @@ -73,7 +83,7 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) { MD5_CTX ctx; MD5Init(&ctx); - MD5Update(&ctx, (uint8_t*) tmp, keyLen); + MD5Update(&ctx, (uint8_t*)tmp, keyLen); char* pStr = base64_encode(ctx.digest, tListLen(ctx.digest)); strcpy(str, pStr); } @@ -99,19 +109,21 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str) { SCond* pDest = &pTagCond->cond[pTagCond->numOfTagCond]; pDest->uid = uid; - pDest->cond = SStringCreate(str); + pDest->cond = strdup(str); pTagCond->numOfTagCond += 1; } bool tscQueryOnMetric(SSqlCmd* pCmd) { - return ((pCmd->type & TSDB_QUERY_TYPE_STABLE_QUERY) == TSDB_QUERY_TYPE_STABLE_QUERY) && + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + return ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) == TSDB_QUERY_TYPE_STABLE_QUERY) && (pCmd->msgType == TSDB_MSG_TYPE_QUERY); } -bool tscQueryMetricTags(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - if (tscSqlExprGet(pCmd, i)->functionId != TSDB_FUNC_TAGPRJ) { +bool tscQueryMetricTags(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + if (tscSqlExprGet(pQueryInfo, i)->functionId != TSDB_FUNC_TAGPRJ) { return false; } } @@ -123,8 +135,10 @@ bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd) { bool hasTags = false; int32_t numOfSelectivity = 0; - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - int32_t functId = tscSqlExprGet(pCmd, i)->functionId; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId; if (functId == TSDB_FUNC_TAG_DUMMY) { hasTags = true; continue; @@ -191,52 +205,85 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx) { return (SMeterSidExtInfo*)(pSidList->pSidExtInfoList[idx] + (char*)pSidList); } -bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd) { - assert(pCmd != NULL); +bool tscIsTwoStageMergeMetricQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) { + if (pQueryInfo == NULL) { + return false; + } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); if (pMeterMetaInfo == NULL || pMeterMetaInfo->pMetricMeta == NULL) { return false; } + + if ((pQueryInfo->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE) { + return false; + } - // for projection query, iterate all qualified vnodes sequentially - if (tscProjectionQueryOnMetric(pCmd)) { + // for ordered projection query, iterate all qualified vnodes sequentially + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, tableIndex)) { return false; } - if (((pCmd->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) != TSDB_QUERY_TYPE_STABLE_SUBQUERY) && - pCmd->command == TSDB_SQL_SELECT) { - return UTIL_METER_IS_METRIC(pMeterMetaInfo); + if (((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) != TSDB_QUERY_TYPE_STABLE_SUBQUERY) && + pQueryInfo->command == TSDB_SQL_SELECT) { + return UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo); } return false; } -bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) { - assert(pCmd != NULL); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - +bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); + /* - * In following cases, return false for project query on metric - * 1. failed to get metermeta from server; 2. not a metric; 3. limit 0; 4. show query, instead of a select query + * In following cases, return false for non ordered project query on super table + * 1. failed to get metermeta from server; 2. not a super table; 3. limitation is 0; + * 4. show queries, instead of a select query */ - if (pMeterMetaInfo == NULL || !UTIL_METER_IS_METRIC(pMeterMetaInfo) || - pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pCmd->exprsInfo.numOfExprs == 0) { + if (pMeterMetaInfo == NULL || !UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo) || + pQueryInfo->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pQueryInfo->exprsInfo.numOfExprs == 0) { return false; } - + // only query on tag, not a projection query - if (tscQueryMetricTags(pCmd)) { + if (tscQueryMetricTags(pQueryInfo)) { + return false; + } + + // for project query, only the following two function is allowed + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG && + functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) { + return false; + } + } + + return true; +} + +bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { + if (!tscIsProjectionQueryOnSTable(pQueryInfo, tableIndex)) { return false; } + + // order by column exists, not a non-ordered projection query + return pQueryInfo->order.orderColId < 0; +} - //for project query, only the following two function is allowed - for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); - int32_t functionId = pExpr->functionId; - if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && - functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) { +bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { + if (!tscIsProjectionQueryOnSTable(pQueryInfo, tableIndex)) { + return false; + } + + // order by column exists, a non-ordered projection query + return pQueryInfo->order.orderColId >= 0; +} + +bool tscProjectionQueryOnTable(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; + if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TS) { return false; } } @@ -244,9 +291,9 @@ bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) { return true; } -bool tscIsPointInterpQuery(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr == NULL) { return false; } @@ -263,9 +310,9 @@ bool tscIsPointInterpQuery(SSqlCmd* pCmd) { return true; } -bool tscIsTWAQuery(SSqlCmd* pCmd) { - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); +bool tscIsTWAQuery(SQueryInfo* pQueryInfo) { + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr == NULL) { continue; } @@ -279,81 +326,122 @@ bool tscIsTWAQuery(SSqlCmd* pCmd) { return false; } -void tscClearInterpInfo(SSqlCmd* pCmd) { - if (!tscIsPointInterpQuery(pCmd)) { +void tscClearInterpInfo(SQueryInfo* pQueryInfo) { + if (!tscIsPointInterpQuery(pQueryInfo)) { return; } - pCmd->interpoType = TSDB_INTERPO_NONE; - memset(pCmd->defaultVal, 0, sizeof(pCmd->defaultVal)); + pQueryInfo->interpoType = TSDB_INTERPO_NONE; + tfree(pQueryInfo->defaultVal); } void tscClearSqlMetaInfoForce(SSqlCmd* pCmd) { /* remove the metermeta/metricmeta in cache */ - // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMeterMeta), - // true); - // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMetricMeta), - // true); + // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMeterMeta), true); + // taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMetricMeta), true); } -int32_t tscCreateResPointerInfo(SSqlCmd* pCmd, SSqlRes* pRes) { +int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) { if (pRes->tsrow == NULL) { pRes->numOfnchar = 0; - int32_t numOfOutputCols = pCmd->fieldsInfo.numOfOutputCols; - + + int32_t numOfOutputCols = pQueryInfo->fieldsInfo.numOfOutputCols; for (int32_t i = 0; i < numOfOutputCols; ++i) { - TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, i); + TAOS_FIELD* pField = tscFieldInfoGetField(pQueryInfo, i); if (pField->type == TSDB_DATA_TYPE_NCHAR) { pRes->numOfnchar++; } } - + pRes->tsrow = calloc(1, (POINTER_BYTES + sizeof(short)) * numOfOutputCols + POINTER_BYTES * pRes->numOfnchar); - if (pRes->tsrow == NULL) { + pRes->bytes = calloc(numOfOutputCols, sizeof(short)); + + if (pRes->numOfnchar > 0) { + pRes->buffer = calloc(POINTER_BYTES, pRes->numOfnchar); + } + + // not enough memory + if (pRes->tsrow == NULL || pRes->bytes == NULL || (pRes->buffer == NULL && pRes->numOfnchar > 0)) { + tfree(pRes->tsrow); + tfree(pRes->bytes); + tfree(pRes->buffer); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; return pRes->code; } - - pRes->bytes = (short*)((char*)pRes->tsrow + POINTER_BYTES * numOfOutputCols); - if (pRes->numOfnchar > 0) { - pRes->buffer = (char**)((char*)pRes->bytes + sizeof(short) * numOfOutputCols); - } } return TSDB_CODE_SUCCESS; } void tscDestroyResPointerInfo(SSqlRes* pRes) { - // free all buffers containing the multibyte string - for (int i = 0; i < pRes->numOfnchar; i++) { - if (pRes->buffer[i] != NULL) { + if (pRes->buffer != NULL) { + assert(pRes->numOfnchar > 0); + // free all buffers containing the multibyte string + for (int i = 0; i < pRes->numOfnchar; i++) { tfree(pRes->buffer[i]); } + + pRes->numOfnchar = 0; } - + + tfree(pRes->pRsp); tfree(pRes->tsrow); - - pRes->numOfnchar = 0; - pRes->buffer = NULL; - pRes->bytes = NULL; + + tfree(pRes->pGroupRec); + tfree(pRes->pColumnIndex); + tfree(pRes->buffer); + tfree(pRes->bytes); + + pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } void tscFreeSqlCmdData(SSqlCmd* pCmd) { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + tscFreeSubqueryInfo(pCmd); +} - tscTagCondRelease(&pCmd->tagCond); - tscClearFieldInfo(&pCmd->fieldsInfo); +/* + * this function must not change the pRes->code value, since it may be used later. + */ +void tscFreeResData(SSqlObj* pSql) { + SSqlRes* pRes = &pSql->res; + + pRes->row = 0; + + pRes->rspType = 0; + pRes->rspLen = 0; + pRes->row = 0; + + pRes->numOfRows = 0; + pRes->numOfTotal = 0; + pRes->numOfTotalInCurrentClause = 0; + + pRes->numOfGroups = 0; + pRes->precision = 0; + pRes->qhandle = 0; + + pRes->offset = 0; + pRes->useconds = 0; + + tscDestroyLocalReducer(pSql); + + tscDestroyResPointerInfo(pRes); +} + +void tscFreeSqlResult(SSqlObj* pSql) { + tfree(pSql->res.pRsp); + pSql->res.row = 0; + pSql->res.numOfRows = 0; + pSql->res.numOfTotal = 0; - tfree(pCmd->exprsInfo.pExprs); - memset(&pCmd->exprsInfo, 0, sizeof(pCmd->exprsInfo)); + pSql->res.numOfGroups = 0; + tfree(pSql->res.pGroupRec); - tscColumnBaseInfoDestroy(&pCmd->colList); - memset(&pCmd->colList, 0, sizeof(pCmd->colList)); + tscDestroyLocalReducer(pSql); - if (pCmd->tsBuf != NULL) { - tsBufDestory(pCmd->tsBuf); - pCmd->tsBuf = NULL; - } + tscDestroyResPointerInfo(&pSql->res); + tfree(pSql->res.pColumnIndex); } void tscFreeSqlObjPartial(SSqlObj* pSql) { @@ -362,8 +450,6 @@ void tscFreeSqlObjPartial(SSqlObj* pSql) { } SSqlCmd* pCmd = &pSql->cmd; - SSqlRes* pRes = &pSql->res; - STscObj* pObj = pSql->pTscObj; int32_t cmd = pCmd->command; @@ -372,30 +458,18 @@ void tscFreeSqlObjPartial(SSqlObj* pSql) { tscRemoveFromSqlList(pSql); } - pCmd->command = -1; + pCmd->command = 0; // pSql->sqlstr will be used by tscBuildQueryStreamDesc pthread_mutex_lock(&pObj->mutex); tfree(pSql->sqlstr); pthread_mutex_unlock(&pObj->mutex); - tfree(pSql->res.pRsp); - pSql->res.row = 0; - pSql->res.numOfRows = 0; - pSql->res.numOfTotal = 0; - - pSql->res.numOfGroups = 0; - tfree(pSql->res.pGroupRec); - - tscDestroyLocalReducer(pSql); - + tscFreeSqlResult(pSql); tfree(pSql->pSubs); pSql->numOfSubs = 0; - tscDestroyResPointerInfo(pRes); - tfree(pSql->res.pColumnIndex); tscFreeSqlCmdData(pCmd); - tscRemoveAllMeterMetaInfo(pCmd, false); } void tscFreeSqlObj(SSqlObj* pSql) { @@ -406,6 +480,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { pSql->signature = NULL; pSql->fp = NULL; + SSqlCmd* pCmd = &pSql->cmd; memset(pCmd->payload, 0, (size_t)pCmd->allocSize); @@ -413,16 +488,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { pCmd->allocSize = 0; - if (pSql->res.buffer != NULL) { - for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; i++) { - if (pSql->res.buffer[i] != NULL) { - tfree(pSql->res.buffer[i]); - } - } - - tfree(pSql->res.buffer); - } - if (pSql->fp == NULL) { tsem_destroy(&pSql->rspSem); tsem_destroy(&pSql->emptyRspSem); @@ -430,15 +495,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { free(pSql); } -STableDataBlocks* tscCreateDataBlock(int32_t size) { - STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks)); - dataBuf->nAllocSize = (uint32_t)size; - dataBuf->pData = calloc(1, dataBuf->nAllocSize); - dataBuf->ordered = true; - dataBuf->prevTS = INT64_MIN; - return dataBuf; -} - void tscDestroyDataBlock(STableDataBlocks* pDataBlock) { if (pDataBlock == NULL) { return; @@ -446,6 +502,9 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) { tfree(pDataBlock->pData); tfree(pDataBlock->params); + + // free the refcount for metermeta + taosRemoveDataFromCache(tscCacheHandle, (void**)&(pDataBlock->pMeterMeta), false); tfree(pDataBlock); } @@ -492,11 +551,11 @@ SDataBlockList* tscCreateBlockArrayList() { void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks) { if (pList->nSize >= pList->nAlloc) { - pList->nAlloc = pList->nAlloc << 1; - pList->pData = realloc(pList->pData, sizeof(void*) * (size_t)pList->nAlloc); + pList->nAlloc = (pList->nAlloc) << 1U; + pList->pData = realloc(pList->pData, POINTER_BYTES * (size_t)pList->nAlloc); // reset allocated memory - memset(pList->pData + pList->nSize, 0, sizeof(void*) * (pList->nAlloc - pList->nSize)); + memset(pList->pData + pList->nSize, 0, POINTER_BYTES * (pList->nAlloc - pList->nSize)); } pList->pData[pList->nSize++] = pBlocks; @@ -519,10 +578,22 @@ void* tscDestroyBlockArrayList(SDataBlockList* pList) { int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { SSqlCmd* pCmd = &pSql->cmd; + assert(pDataBlock->pMeterMeta != NULL); + + pCmd->numOfTablesInSubmit = pDataBlock->numOfMeters; - pCmd->count = pDataBlock->numOfMeters; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - strcpy(pMeterMetaInfo->name, pDataBlock->meterId); + assert(pCmd->numOfClause == 1); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0); + + // set the correct metermeta object, the metermeta has been locked in pDataBlocks, so it must be in the cache + if (pMeterMetaInfo->pMeterMeta != pDataBlock->pMeterMeta) { + strcpy(pMeterMetaInfo->name, pDataBlock->meterId); + taosRemoveDataFromCache(tscCacheHandle, (void**)&(pMeterMetaInfo->pMeterMeta), false); + + pMeterMetaInfo->pMeterMeta = taosTransferDataInCache(tscCacheHandle, (void**)&pDataBlock->pMeterMeta); + } else { + assert(strncmp(pMeterMetaInfo->name, pDataBlock->meterId, tListLen(pDataBlock->meterId)) == 0); + } /* * the submit message consists of : [RPC header|message body|digest] @@ -530,7 +601,10 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { * additional space. */ int ret = tscAllocPayload(pCmd, pDataBlock->nAllocSize + sizeof(STaosDigest)); - if (TSDB_CODE_SUCCESS != ret) return ret; + if (TSDB_CODE_SUCCESS != ret) { + return ret; + } + memcpy(pCmd->payload, pDataBlock->pData, pDataBlock->nAllocSize); /* @@ -540,7 +614,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { pCmd->payloadLen = pDataBlock->nAllocSize - tsRpcHeadSize; assert(pCmd->allocSize >= pCmd->payloadLen + tsRpcHeadSize + sizeof(STaosDigest)); - return tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0); + return TSDB_CODE_SUCCESS; } void tscFreeUnusedDataBlocks(SDataBlockList* pList) { @@ -552,47 +626,93 @@ void tscFreeUnusedDataBlocks(SDataBlockList* pList) { } } -STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name) { - STableDataBlocks* dataBuf = tscCreateDataBlock(size); +/** + * create the in-memory buffer for each table to keep the submitted data block + * @param initialSize + * @param rowSize + * @param startOffset + * @param name + * @param dataBlocks + * @return + */ +int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name, + SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks) { + STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks)); + if (dataBuf == NULL) { + tscError("failed to allocated memory, reason:%s", strerror(errno)); + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } + + dataBuf->nAllocSize = (uint32_t)initialSize; + dataBuf->headerSize = startOffset; // the header size will always be the startOffset value, reserved for the subumit block header + if (dataBuf->nAllocSize <= dataBuf->headerSize) { + dataBuf->nAllocSize = dataBuf->headerSize*2; + } + + dataBuf->pData = calloc(1, dataBuf->nAllocSize); + dataBuf->ordered = true; + dataBuf->prevTS = INT64_MIN; dataBuf->rowSize = rowSize; dataBuf->size = startOffset; dataBuf->tsSource = -1; strncpy(dataBuf->meterId, name, TSDB_METER_ID_LEN); - return dataBuf; + + /* + * The metermeta may be released since the metermeta cache are completed clean by other thread + * due to operation such as drop database. So here we add the reference count directly instead of invoke + * taosGetDataFromCache, which may return NULL value. + */ + dataBuf->pMeterMeta = taosGetDataFromExists(tscCacheHandle, pMeterMeta); + assert(initialSize > 0 && pMeterMeta != NULL && dataBuf->pMeterMeta != NULL); + + *dataBlocks = dataBuf; + return TSDB_CODE_SUCCESS; } -STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, char* tableId) { - STableDataBlocks* dataBuf = NULL; +int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, + int32_t startOffset, int32_t rowSize, const char* tableId, SMeterMeta* pMeterMeta, + STableDataBlocks** dataBlocks) { + *dataBlocks = NULL; - STableDataBlocks** t1 = (STableDataBlocks**)taosGetIntHashData(pHashList, id); + STableDataBlocks** t1 = (STableDataBlocks**)taosGetDataFromHashTable(pHashList, (const char*)&id, sizeof(id)); if (t1 != NULL) { - dataBuf = *t1; + *dataBlocks = *t1; } - if (dataBuf == NULL) { - dataBuf = tscCreateDataBlockEx((size_t)size, rowSize, startOffset, tableId); - dataBuf = *(STableDataBlocks**)taosAddIntHash(pHashList, id, (char*)&dataBuf); - tscAppendDataBlock(pDataBlockList, dataBuf); + if (*dataBlocks == NULL) { + int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, tableId, pMeterMeta, dataBlocks); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + taosAddToHashTable(pHashList, (const char*)&id, sizeof(int64_t), (char*)dataBlocks, POINTER_BYTES); + tscAppendDataBlock(pDataBlockList, *dataBlocks); } - return dataBuf; + return TSDB_CODE_SUCCESS; } int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockList) { SSqlCmd* pCmd = &pSql->cmd; - void* pVnodeDataBlockHashList = taosInitIntHash(8, POINTER_BYTES, taosHashInt); + void* pVnodeDataBlockHashList = taosInitHashTable(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); SDataBlockList* pVnodeDataBlockList = tscCreateBlockArrayList(); for (int32_t i = 0; i < pTableDataBlockList->nSize; ++i) { STableDataBlocks* pOneTableBlock = pTableDataBlockList->pData[i]; - STableDataBlocks* dataBuf = + STableDataBlocks* dataBuf = NULL; + int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pVnodeDataBlockList, pOneTableBlock->vgid, TSDB_PAYLOAD_SIZE, - tsInsertHeadSize, 0, pOneTableBlock->meterId); + tsInsertHeadSize, 0, pOneTableBlock->meterId, pOneTableBlock->pMeterMeta, &dataBuf); + if (ret != TSDB_CODE_SUCCESS) { + tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret); + taosCleanUpHashTable(pVnodeDataBlockHashList); + tscDestroyBlockArrayList(pVnodeDataBlockList); + return ret; + } int64_t destSize = dataBuf->size + pOneTableBlock->size; if (dataBuf->nAllocSize < destSize) { @@ -607,7 +727,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi } else { // failed to allocate memory, free already allocated memory and return error code tscError("%p failed to allocate memory for merging submit block, size:%d", pSql, dataBuf->nAllocSize); - taosCleanUpIntHash(pVnodeDataBlockHashList); + taosCleanUpHashTable(pVnodeDataBlockHashList); tfree(dataBuf->pData); tscDestroyBlockArrayList(pVnodeDataBlockList); @@ -618,8 +738,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi SShellSubmitBlock* pBlocks = (SShellSubmitBlock*)pOneTableBlock->pData; sortRemoveDuplicates(pOneTableBlock); - tscTrace("%p meterId:%s, sid:%d, rows:%d, sversion:%d", pSql, pOneTableBlock->meterId, pBlocks->sid, - pBlocks->numOfRows, pBlocks->sversion); + char* e = (char*)pBlocks->payLoad + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); + + tscTrace("%p meterId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->meterId, pBlocks->sid, + pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->payLoad), GET_INT64_VAL(e)); pBlocks->sid = htonl(pBlocks->sid); pBlocks->uid = htobe64(pBlocks->uid); @@ -638,7 +760,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi pCmd->pDataBlocks = pVnodeDataBlockList; tscFreeUnusedDataBlocks(pCmd->pDataBlocks); - taosCleanUpIntHash(pVnodeDataBlockHashList); + taosCleanUpHashTable(pVnodeDataBlockHashList); return TSDB_CODE_SUCCESS; } @@ -658,8 +780,13 @@ void tscCloseTscObj(STscObj* pObj) { bool tscIsInsertOrImportData(char* sqlstr) { int32_t index = 0; - SSQLToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL); - return t0.type == TK_INSERT || t0.type == TK_IMPORT; + + do { + SSQLToken t0 = tStrGetToken(sqlstr, &index, false, 0, NULL); + if (t0.type != TK_LP) { + return t0.type == TK_INSERT || t0.type == TK_IMPORT; + } + } while (1); } int tscAllocPayload(SSqlCmd* pCmd, int size) { @@ -746,7 +873,7 @@ void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIE } void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible) { - if (index < 0 || index > pFieldInfo->numOfOutputCols) { + if (index < 0 || index >= pFieldInfo->numOfOutputCols) { return; } @@ -775,8 +902,8 @@ void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, co pFieldInfo->numOfOutputCols++; } -void tscFieldInfoCalOffset(SSqlCmd* pCmd) { - SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; +void tscFieldInfoCalOffset(SQueryInfo* pQueryInfo) { + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; pFieldInfo->pOffset[0] = 0; for (int32_t i = 1; i < pFieldInfo->numOfOutputCols; ++i) { @@ -784,8 +911,8 @@ void tscFieldInfoCalOffset(SSqlCmd* pCmd) { } } -void tscFieldInfoUpdateOffset(SSqlCmd* pCmd) { - SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; +void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo) { + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; if (pFieldInfo->numOfOutputCols == 0) { return; } @@ -797,7 +924,7 @@ void tscFieldInfoUpdateOffset(SSqlCmd* pCmd) { * for potential secondary merge exists */ for (int32_t i = 1; i < pFieldInfo->numOfOutputCols; ++i) { - pFieldInfo->pOffset[i] = pFieldInfo->pOffset[i - 1] + tscSqlExprGet(pCmd, i - 1)->resBytes; + pFieldInfo->pOffset[i] = pFieldInfo->pOffset[i - 1] + tscSqlExprGet(pQueryInfo, i - 1)->resBytes; } } @@ -808,7 +935,7 @@ void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList if (size <= 0) { *dst = *src; - tscFieldInfoCopyAll(src, dst); + tscFieldInfoCopyAll(dst, src); } else { // only copy the required column for (int32_t i = 0; i < size; ++i) { assert(indexList[i] >= 0 && indexList[i] <= src->numOfOutputCols); @@ -817,7 +944,7 @@ void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList } } -void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst) { +void tscFieldInfoCopyAll(SFieldInfo* dst, SFieldInfo* src) { *dst = *src; dst->pFields = malloc(sizeof(TAOS_FIELD) * dst->numOfAlloc); @@ -829,24 +956,46 @@ void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst) { memcpy(dst->pVisibleCols, src->pVisibleCols, sizeof(bool) * dst->numOfOutputCols); } -TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index) { - if (index >= pCmd->fieldsInfo.numOfOutputCols) { +TAOS_FIELD* tscFieldInfoGetField(SQueryInfo* pQueryInfo, int32_t index) { + if (index >= pQueryInfo->fieldsInfo.numOfOutputCols) { return NULL; } - return &pCmd->fieldsInfo.pFields[index]; + return &pQueryInfo->fieldsInfo.pFields[index]; } -int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index) { - if (index >= pCmd->fieldsInfo.numOfOutputCols) { +int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutputCols; } + +int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { + if (index >= pQueryInfo->fieldsInfo.numOfOutputCols) { return 0; } - return pCmd->fieldsInfo.pOffset[index]; + return pQueryInfo->fieldsInfo.pOffset[index]; +} + +int32_t tscFieldInfoCompare(SFieldInfo* pFieldInfo1, SFieldInfo* pFieldInfo2) { + assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL); + + if (pFieldInfo1->numOfOutputCols != pFieldInfo2->numOfOutputCols) { + return pFieldInfo1->numOfOutputCols - pFieldInfo2->numOfOutputCols; + } + + for (int32_t i = 0; i < pFieldInfo1->numOfOutputCols; ++i) { + TAOS_FIELD* pField1 = &pFieldInfo1->pFields[i]; + TAOS_FIELD* pField2 = &pFieldInfo2->pFields[i]; + + if (pField1->type != pField2->type || pField1->bytes != pField2->bytes || + strcasecmp(pField1->name, pField2->name) != 0) { + return 1; + } + } + + return 0; } -int32_t tscGetResRowLength(SSqlCmd* pCmd) { - SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; +int32_t tscGetResRowLength(SQueryInfo* pQueryInfo) { + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; if (pFieldInfo->numOfOutputCols <= 0) { return 0; } @@ -896,24 +1045,24 @@ static void _exprEvic(SSqlExprInfo* pExprInfo, int32_t index) { } } -SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId) { - SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; - +SSqlExpr* tscSqlExprInsertEmpty(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId) { + SSqlExprInfo* pExprInfo = &pQueryInfo->exprsInfo; + _exprCheckSpace(pExprInfo, pExprInfo->numOfExprs + 1); _exprEvic(pExprInfo, index); - + SSqlExpr* pExpr = &pExprInfo->pExprs[index]; pExpr->functionId = functionId; - + pExprInfo->numOfExprs++; return pExpr; } -SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pColIndex->tableIndex); +SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, + int16_t type, int16_t size, int16_t interSize) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, pColIndex->tableIndex); - SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; + SSqlExprInfo* pExprInfo = &pQueryInfo->exprsInfo; _exprCheckSpace(pExprInfo, pExprInfo->numOfExprs + 1); _exprEvic(pExprInfo, index); @@ -953,10 +1102,10 @@ SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SCo return pExpr; } -SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, - int16_t size) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; +SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, + int16_t type, int16_t size) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + SSqlExprInfo* pExprInfo = &pQueryInfo->exprsInfo; if (index > pExprInfo->numOfExprs) { return NULL; } @@ -987,14 +1136,45 @@ void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, assert(pExpr->numOfParams <= 3); } -SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index) { - if (pCmd->exprsInfo.numOfExprs <= index) { +SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index) { + if (pQueryInfo->exprsInfo.numOfExprs <= index) { return NULL; } - return &pCmd->exprsInfo.pExprs[index]; + return &pQueryInfo->exprsInfo.pExprs[index]; } +void* tscSqlExprDestroy(SSqlExpr* pExpr) { + if (pExpr == NULL) { + return NULL; + } + + for(int32_t i = 0; i < tListLen(pExpr->param); ++i) { + tVariantDestroy(&pExpr->param[i]); + } + + return NULL; +} + +/* + * NOTE: Does not release SSqlExprInfo here. + */ +void tscSqlExprInfoDestroy(SSqlExprInfo* pExprInfo) { + if (pExprInfo->numOfAlloc == 0) { + return; + } + + for(int32_t i = 0; i < pExprInfo->numOfAlloc; ++i) { + tscSqlExprDestroy(&pExprInfo->pExprs[i]); + } + + tfree(pExprInfo->pExprs); + + pExprInfo->numOfAlloc = 0; + pExprInfo->numOfExprs = 0; +} + + void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t tableuid) { if (src == NULL) { return; @@ -1002,7 +1182,7 @@ void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t tableui *dst = *src; - dst->pExprs = malloc(sizeof(SSqlExpr) * dst->numOfAlloc); + dst->pExprs = calloc(dst->numOfAlloc, sizeof(SSqlExpr)); int16_t num = 0; for (int32_t i = 0; i < src->numOfExprs; ++i) { if (src->pExprs[i].uid == tableuid) { @@ -1071,8 +1251,8 @@ void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableI } // todo refactor -SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* pColIndex) { - SColumnBaseInfo* pcolList = &pCmd->colList; +SColumnBase* tscColumnBaseInfoInsert(SQueryInfo* pQueryInfo, SColumnIndex* pColIndex) { + SColumnBaseInfo* pcolList = &pQueryInfo->colList; // ignore the tbname column to be inserted into source list if (pColIndex->columnIndex < 0) { @@ -1100,14 +1280,13 @@ SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* pColIndex) { pcolList->pColList[i].colIndex = *pColIndex; pcolList->numOfCols++; - pCmd->numOfCols++; } return &pcolList->pColList[i]; } void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src) { - assert (src != NULL && dst != NULL); + assert(src != NULL && dst != NULL); assert(src->filterOnBinary == 0 || src->filterOnBinary == 1); if (src->lowerRelOptr == TSDB_RELATION_INVALID && src->upperRelOptr == TSDB_RELATION_INVALID) { @@ -1116,14 +1295,15 @@ void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* sr *dst = *src; if (dst->filterOnBinary) { - size_t len = (size_t) dst->len + 1; - dst->pz = calloc(1, len); - memcpy((char*) dst->pz, (char*) src->pz, (size_t) len); + size_t len = (size_t)dst->len + 1; + char* pTmp = calloc(1, len); + dst->pz = (int64_t)pTmp; + memcpy((char*)dst->pz, (char*)src->pz, (size_t)len); } } void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src) { - assert (src != NULL && dst != NULL); + assert(src != NULL && dst != NULL); *dst = *src; @@ -1181,7 +1361,8 @@ void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo) { assert(pColBase->filterInfo[j].filterOnBinary == 0 || pColBase->filterInfo[j].filterOnBinary == 1); if (pColBase->filterInfo[j].filterOnBinary) { - tfree(pColBase->filterInfo[j].pz); + free((char*)pColBase->filterInfo[j].pz); + pColBase->filterInfo[j].pz = 0; } } } @@ -1316,13 +1497,12 @@ void tscIncStreamExecutionCount(void* pStream) { ps->num += 1; } -bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +bool tscValidateColumnId(SMeterMetaInfo* pMeterMetaInfo, int32_t colId) { if (pMeterMetaInfo->pMeterMeta == NULL) { return false; } - if (colId == -1 && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (colId == -1 && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { return true; } @@ -1341,13 +1521,19 @@ bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId) { void tscTagCondCopy(STagCond* dest, const STagCond* src) { memset(dest, 0, sizeof(STagCond)); - SStringCopy(&dest->tbnameCond.cond, &src->tbnameCond.cond); + if (src->tbnameCond.cond != NULL) { + dest->tbnameCond.cond = strdup(src->tbnameCond.cond); + } + dest->tbnameCond.uid = src->tbnameCond.uid; memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo)); for (int32_t i = 0; i < src->numOfTagCond; ++i) { - SStringCopy(&dest->cond[i].cond, &src->cond[i].cond); + if (src->cond[i].cond != NULL) { + dest->cond[i].cond = strdup(src->cond[i].cond); + } + dest->cond[i].uid = src->cond[i].uid; } @@ -1356,21 +1542,20 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) { } void tscTagCondRelease(STagCond* pCond) { - SStringFree(&pCond->tbnameCond.cond); - + free(pCond->tbnameCond.cond); for (int32_t i = 0; i < pCond->numOfTagCond; ++i) { - SStringFree(&pCond->cond[i].cond); + free(pCond->cond[i].cond); } memset(pCond, 0, sizeof(STagCond)); } -void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); +void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta); - for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); pColInfo[i].functionId = pExpr->functionId; if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { @@ -1393,18 +1578,20 @@ void tscSetFreeHeatBeat(STscObj* pObj) { assert(pHeatBeat == pHeatBeat->signature); // to denote the heart-beat timer close connection and free all allocated resources - pHeatBeat->cmd.type = TSDB_QUERY_TYPE_FREE_RESOURCE; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pHeatBeat->cmd, 0); + pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; } bool tscShouldFreeHeatBeat(SSqlObj* pHb) { assert(pHb == pHb->signature); - return pHb->cmd.type == TSDB_QUERY_TYPE_FREE_RESOURCE; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pHb->cmd, 0); + return pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE; } void tscCleanSqlCmd(SSqlCmd* pCmd) { - tscFreeSqlCmdData(pCmd); - - assert(pCmd->pMeterInfo == NULL); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + tscFreeSubqueryInfo(pCmd); uint32_t allocSize = pCmd->allocSize; char* allocPtr = pCmd->payload; @@ -1458,7 +1645,12 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { * data blocks have been submit to vnode. */ SDataBlockList* pDataBlocks = pCmd->pDataBlocks; - if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1 || pQueryInfo->numOfTables == 2); + + if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { tscTrace("%p object should be release since all data blocks have been submit", pSql); return true; } else { @@ -1470,18 +1662,68 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { } } -SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index) { - if (pCmd == NULL || index >= pCmd->numOfTables || index < 0) { +/** + * + * @param pCmd + * @param clauseIndex denote the index of the union sub clause, usually are 0, if no union query exists. + * @param tableIndex denote the table index for join query, where more than one table exists + * @return + */ +SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t clauseIndex, int32_t tableIndex) { + if (pCmd == NULL || pCmd->numOfClause == 0) { return NULL; } - return pCmd->pMeterInfo[index]; + assert(clauseIndex >= 0 && clauseIndex < pCmd->numOfClause); + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + return tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, tableIndex); +} + +SMeterMetaInfo* tscGetMeterMetaInfoFromQueryInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) { + assert(pQueryInfo != NULL); + + if (pQueryInfo->pMeterInfo == NULL) { + assert(pQueryInfo->numOfTables == 0); + return NULL; + } + + assert(tableIndex >= 0 && tableIndex <= pQueryInfo->numOfTables && pQueryInfo->pMeterInfo != NULL); + + return pQueryInfo->pMeterInfo[tableIndex]; } -SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index) { +SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) { + assert(pCmd != NULL && subClauseIndex >= 0 && subClauseIndex < TSDB_MAX_UNION_CLAUSE); + + if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) { + return NULL; + } + + return pCmd->pQueryInfo[subClauseIndex]; +} + +int32_t tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo) { + int32_t ret = TSDB_CODE_SUCCESS; + + *pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex); + + while ((*pQueryInfo) == NULL) { + if ((ret = tscAddSubqueryInfo(pCmd)) != TSDB_CODE_SUCCESS) { + return ret; + } + + (*pQueryInfo) = tscGetQueryInfoDetail(pCmd, subClauseIndex); + } + + return TSDB_CODE_SUCCESS; +} + +SMeterMetaInfo* tscGetMeterMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index) { int32_t k = -1; - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - if (pCmd->pMeterInfo[i]->pMeterMeta->uid == uid) { + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + if (pQueryInfo->pMeterInfo[i]->pMeterMeta->uid == uid) { k = i; break; } @@ -1491,20 +1733,80 @@ SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* i *index = k; } - return tscGetMeterMetaInfo(pCmd, k); + assert(k != -1); + return tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, k); +} + +int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) { + assert(pCmd != NULL); + + size_t s = pCmd->numOfClause + 1; + char* tmp = realloc(pCmd->pQueryInfo, s * POINTER_BYTES); + if (tmp == NULL) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } + + pCmd->pQueryInfo = (SQueryInfo**)tmp; + + SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo)); + pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer + + pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo; + return TSDB_CODE_SUCCESS; +} + +static void doClearSubqueryInfo(SQueryInfo* pQueryInfo) { + tscTagCondRelease(&pQueryInfo->tagCond); + tscClearFieldInfo(&pQueryInfo->fieldsInfo); + + tscSqlExprInfoDestroy(&pQueryInfo->exprsInfo); + memset(&pQueryInfo->exprsInfo, 0, sizeof(pQueryInfo->exprsInfo)); + + tscColumnBaseInfoDestroy(&pQueryInfo->colList); + memset(&pQueryInfo->colList, 0, sizeof(pQueryInfo->colList)); + + pQueryInfo->tsBuf = tsBufDestory(pQueryInfo->tsBuf); + + tfree(pQueryInfo->defaultVal); +} + +void tscClearSubqueryInfo(SSqlCmd* pCmd) { + for (int32_t i = 0; i < pCmd->numOfClause; ++i) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i); + doClearSubqueryInfo(pQueryInfo); + } +} + +void tscFreeSubqueryInfo(SSqlCmd* pCmd) { + if (pCmd == NULL || pCmd->numOfClause == 0) { + return; + } + + for (int32_t i = 0; i < pCmd->numOfClause; ++i) { + char* addr = (char*)pCmd - offsetof(SSqlObj, cmd); + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i); + + doClearSubqueryInfo(pQueryInfo); + tscRemoveAllMeterMetaInfo(pQueryInfo, (const char*)addr, false); + tfree(pQueryInfo); + } + + pCmd->numOfClause = 0; + tfree(pCmd->pQueryInfo); } -SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, - int16_t numOfTags, int16_t* tags) { - void* pAlloc = realloc(pCmd->pMeterInfo, (pCmd->numOfTables + 1) * POINTER_BYTES); +SMeterMetaInfo* tscAddMeterMetaInfo(SQueryInfo* pQueryInfo, const char* name, SMeterMeta* pMeterMeta, + SMetricMeta* pMetricMeta, int16_t numOfTags, int16_t* tags) { + void* pAlloc = realloc(pQueryInfo->pMeterInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES); if (pAlloc == NULL) { return NULL; } - pCmd->pMeterInfo = pAlloc; - pCmd->pMeterInfo[pCmd->numOfTables] = calloc(1, sizeof(SMeterMetaInfo)); + pQueryInfo->pMeterInfo = pAlloc; + pQueryInfo->pMeterInfo[pQueryInfo->numOfTables] = calloc(1, sizeof(SMeterMetaInfo)); - SMeterMetaInfo* pMeterMetaInfo = pCmd->pMeterInfo[pCmd->numOfTables]; + SMeterMetaInfo* pMeterMetaInfo = pQueryInfo->pMeterInfo[pQueryInfo->numOfTables]; assert(pMeterMetaInfo != NULL); if (name != NULL) { @@ -1517,44 +1819,44 @@ SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMetaInfo->numOfTags = numOfTags; if (tags != NULL) { - memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(int16_t) * numOfTags); + memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(pMeterMetaInfo->tagColumnIndex[0]) * numOfTags); } - pCmd->numOfTables += 1; - + pQueryInfo->numOfTables += 1; return pMeterMetaInfo; } -SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd) { return tscAddMeterMetaInfo(pCmd, NULL, NULL, NULL, 0, NULL); } +SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SQueryInfo* pQueryInfo) { + return tscAddMeterMetaInfo(pQueryInfo, NULL, NULL, NULL, 0, NULL); +} -void tscRemoveMeterMetaInfo(SSqlCmd* pCmd, int32_t index, bool removeFromCache) { - if (index < 0 || index >= pCmd->numOfTables) { +void doRemoveMeterMetaInfo(SQueryInfo* pQueryInfo, int32_t index, bool removeFromCache) { + if (index < 0 || index >= pQueryInfo->numOfTables) { return; } - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, index); tscClearMeterMetaInfo(pMeterMetaInfo, removeFromCache); free(pMeterMetaInfo); - int32_t after = pCmd->numOfTables - index - 1; + int32_t after = pQueryInfo->numOfTables - index - 1; if (after > 0) { - memmove(&pCmd->pMeterInfo[index], &pCmd->pMeterInfo[index + 1], after * sizeof(void*)); + memmove(&pQueryInfo->pMeterInfo[index], &pQueryInfo->pMeterInfo[index + 1], after * POINTER_BYTES); } - pCmd->numOfTables -= 1; + pQueryInfo->numOfTables -= 1; } -void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache) { - int64_t addr = offsetof(SSqlObj, cmd); - - tscTrace("%p deref the metric/meter meta in cache, numOfTables:%d", ((char*)pCmd - addr), pCmd->numOfTables); +void tscRemoveAllMeterMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache) { + tscTrace("%p deref the metric/meter meta in cache, numOfTables:%d", address, pQueryInfo->numOfTables); - while (pCmd->numOfTables > 0) { - tscRemoveMeterMetaInfo(pCmd, pCmd->numOfTables - 1, removeFromCache); + int32_t index = pQueryInfo->numOfTables; + while (index >= 0) { + doRemoveMeterMetaInfo(pQueryInfo, --index, removeFromCache); } - tfree(pCmd->pMeterInfo); + tfree(pQueryInfo->pMeterInfo); } void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache) { @@ -1567,134 +1869,21 @@ void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache) } void tscResetForNextRetrieve(SSqlRes* pRes) { - pRes->row = 0; - pRes->numOfRows = 0; -} - -SString SStringCreate(const char* str) { - size_t len = strlen(str); - - SString dest = {.n = len, .alloc = len + 1}; - dest.z = calloc(1, dest.alloc); - strcpy(dest.z, str); - - return dest; -} - -void SStringCopy(SString* pDest, const SString* pSrc) { - if (pSrc->n > 0) { - pDest->n = pSrc->n; - pDest->alloc = pDest->n + 1; // one additional space for null terminate - - pDest->z = calloc(1, pDest->alloc); - - memcpy(pDest->z, pSrc->z, pDest->n); - } else { - memset(pDest, 0, sizeof(SString)); - } -} - -void SStringFree(SString* pStr) { - if (pStr->alloc > 0) { - tfree(pStr->z); - pStr->alloc = 0; - } -} - -void SStringShrink(SString* pStr) { - if (pStr->alloc > (pStr->n + 1) && pStr->alloc > (pStr->n * 2)) { - pStr->z = realloc(pStr->z, pStr->n + 1); - assert(pStr->z != NULL); - - pStr->alloc = pStr->n + 1; - } -} - -int32_t SStringAlloc(SString* pStr, int32_t size) { - if (pStr->alloc >= size) { - return TSDB_CODE_SUCCESS; - } - - size = ALIGN8(size); - - char* tmp = NULL; - if (pStr->z != NULL) { - tmp = realloc(pStr->z, size); - memset(pStr->z + pStr->n, 0, size - pStr->n); - } else { - tmp = calloc(1, size); - } - - if (tmp == NULL) { -#ifdef WINDOWS - LPVOID lpMsgBuf; - FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, - GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language - (LPTSTR)&lpMsgBuf, 0, NULL); - tscTrace("failed to allocate memory, reason:%s", lpMsgBuf); - LocalFree(lpMsgBuf); -#else - char errmsg[256] = {0}; - strerror_r(errno, errmsg, tListLen(errmsg)); - tscTrace("failed to allocate memory, reason:%s", errmsg); -#endif - return TSDB_CODE_CLI_OUT_OF_MEMORY; - } - - pStr->z = tmp; - pStr->alloc = size; - - return TSDB_CODE_SUCCESS; -} - -#define MIN_ALLOC_SIZE 8 - -int32_t SStringEnsureRemain(SString* pStr, int32_t size) { - if (pStr->alloc - pStr->n > size) { - return TSDB_CODE_SUCCESS; - } - - // remain space is insufficient, allocate more spaces - int32_t inc = (size >= MIN_ALLOC_SIZE) ? size : MIN_ALLOC_SIZE; - if (inc < (pStr->alloc >> 1)) { - inc = (pStr->alloc >> 1); - } - - // get the new size - int32_t newsize = pStr->alloc + inc; - - char* tmp = realloc(pStr->z, newsize); - if (tmp == NULL) { -#ifdef WINDOWS - LPVOID lpMsgBuf; - FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, - GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language - (LPTSTR)&lpMsgBuf, 0, NULL); - tscTrace("failed to allocate memory, reason:%s", lpMsgBuf); - LocalFree(lpMsgBuf); -#else - char errmsg[256] = {0}; - strerror_r(errno, errmsg, tListLen(errmsg)); - tscTrace("failed to allocate memory, reason:%s", errmsg); -#endif - - return TSDB_CODE_CLI_OUT_OF_MEMORY; + if (pRes == NULL) { + return; } - memset(tmp + pStr->n, 0, inc); - pStr->alloc = newsize; - pStr->z = tmp; - - return TSDB_CODE_SUCCESS; + pRes->row = 0; + pRes->numOfRows = 0; } -SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param, - SSqlObj* pPrevSql) { - SSqlCmd* pCmd = &pSql->cmd; +SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql) { + SSqlCmd* pCmd = &pSql->cmd; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, tableIndex); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); if (pNew == NULL) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); return NULL; } @@ -1703,7 +1892,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex pNew->sqlstr = strdup(pSql->sqlstr); if (pNew->sqlstr == NULL) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); free(pNew); return NULL; @@ -1715,63 +1904,80 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex pNew->cmd.payload = NULL; pNew->cmd.allocSize = 0; - pNew->cmd.pMeterInfo = NULL; + pNew->cmd.pQueryInfo = NULL; + pNew->cmd.numOfClause = 0; + pNew->cmd.clauseIndex = 0; - pNew->cmd.colList.pColList = NULL; - pNew->cmd.colList.numOfAlloc = 0; - pNew->cmd.colList.numOfCols = 0; + if (tscAddSubqueryInfo(&pNew->cmd) != TSDB_CODE_SUCCESS) { + tscFreeSqlObj(pNew); + return NULL; + } - pNew->cmd.numOfTables = 0; - pNew->cmd.tsBuf = NULL; + SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - memset(&pNew->cmd.fieldsInfo, 0, sizeof(SFieldInfo)); - tscTagCondCopy(&pNew->cmd.tagCond, &pCmd->tagCond); + memcpy(pNewQueryInfo, pQueryInfo, sizeof(SQueryInfo)); + + memset(&pNewQueryInfo->colList, 0, sizeof(pNewQueryInfo->colList)); + memset(&pNewQueryInfo->fieldsInfo, 0, sizeof(SFieldInfo)); + + pNewQueryInfo->pMeterInfo = NULL; + pNewQueryInfo->defaultVal = NULL; + pNewQueryInfo->numOfTables = 0; + pNewQueryInfo->tsBuf = NULL; + + tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond); + + if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) { + pNewQueryInfo->defaultVal = malloc(pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(int64_t)); + memcpy(pNewQueryInfo->defaultVal, pQueryInfo->defaultVal, pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(int64_t)); + } if (tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); tscFreeSqlObj(pNew); return NULL; } - tscColumnBaseInfoCopy(&pNew->cmd.colList, &pCmd->colList, (int16_t)tableIndex); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); + tscColumnBaseInfoCopy(&pNewQueryInfo->colList, &pQueryInfo->colList, (int16_t)tableIndex); // set the correct query type if (pPrevSql != NULL) { - pNew->cmd.type = pPrevSql->cmd.type; + SQueryInfo* pPrevQueryInfo = tscGetQueryInfoDetail(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex); + pNewQueryInfo->type = pPrevQueryInfo->type; } else { - pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; // it must be the subquery + pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; // it must be the subquery } uint64_t uid = pMeterMetaInfo->pMeterMeta->uid; - tscSqlExprCopy(&pNew->cmd.exprsInfo, &pCmd->exprsInfo, uid); + tscSqlExprCopy(&pNewQueryInfo->exprsInfo, &pQueryInfo->exprsInfo, uid); - int32_t numOfOutputCols = pNew->cmd.exprsInfo.numOfExprs; + int32_t numOfOutputCols = pNewQueryInfo->exprsInfo.numOfExprs; if (numOfOutputCols > 0) { int32_t* indexList = calloc(1, numOfOutputCols * sizeof(int32_t)); - for (int32_t i = 0, j = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + for (int32_t i = 0, j = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->uid == uid) { indexList[j++] = i; } } - tscFieldInfoCopy(&pCmd->fieldsInfo, &pNew->cmd.fieldsInfo, indexList, numOfOutputCols); + tscFieldInfoCopy(&pQueryInfo->fieldsInfo, &pNewQueryInfo->fieldsInfo, indexList, numOfOutputCols); free(indexList); - tscFieldInfoUpdateOffset(&pNew->cmd); + tscFieldInfoUpdateOffsetForInterResult(pNewQueryInfo); } pNew->fp = fp; - pNew->param = param; - pNew->cmd.vnodeIdx = vnodeIndex; - SMeterMetaInfo* pMetermetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); char key[TSDB_MAX_TAGS_LEN + 1] = {0}; - tscGetMetricMetaCacheKey(pCmd, key, pMetermetaInfo->pMeterMeta->uid); + tscGetMetricMetaCacheKey(pQueryInfo, key, uid); + +#ifdef _DEBUG_VIEW + printf("the metricmeta key is:%s\n", key); +#endif char* name = pMeterMetaInfo->name; SMeterMetaInfo* pFinalInfo = NULL; @@ -1780,31 +1986,41 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex SMeterMeta* pMeterMeta = taosGetDataFromCache(tscCacheHandle, name); SMetricMeta* pMetricMeta = taosGetDataFromCache(tscCacheHandle, key); - pFinalInfo = tscAddMeterMetaInfo(&pNew->cmd, name, pMeterMeta, pMetricMeta, pMeterMetaInfo->numOfTags, + pFinalInfo = tscAddMeterMetaInfo(pNewQueryInfo, name, pMeterMeta, pMetricMeta, pMeterMetaInfo->numOfTags, pMeterMetaInfo->tagColumnIndex); - } else { - SMeterMetaInfo* pPrevInfo = tscGetMeterMetaInfo(&pPrevSql->cmd, 0); - pFinalInfo = tscAddMeterMetaInfo(&pNew->cmd, name, pPrevInfo->pMeterMeta, pPrevInfo->pMetricMeta, - pMeterMetaInfo->numOfTags, pMeterMetaInfo->tagColumnIndex); + } else { // transfer the ownership of pMeterMeta/pMetricMeta to the newly create sql object. + SMeterMetaInfo* pPrevInfo = tscGetMeterMetaInfo(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0); - pPrevInfo->pMeterMeta = NULL; - pPrevInfo->pMetricMeta = NULL; + SMeterMeta* pPrevMeterMeta = taosTransferDataInCache(tscCacheHandle, (void**)&pPrevInfo->pMeterMeta); + SMetricMeta* pPrevMetricMeta = taosTransferDataInCache(tscCacheHandle, (void**)&pPrevInfo->pMetricMeta); + + pFinalInfo = tscAddMeterMetaInfo(pNewQueryInfo, name, pPrevMeterMeta, pPrevMetricMeta, pMeterMetaInfo->numOfTags, + pMeterMetaInfo->tagColumnIndex); } - assert(pFinalInfo->pMeterMeta != NULL); - if (UTIL_METER_IS_METRIC(pMetermetaInfo)) { + assert(pFinalInfo->pMeterMeta != NULL && pNewQueryInfo->numOfTables == 1); + if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) { assert(pFinalInfo->pMetricMeta != NULL); } + + tscTrace( + "%p new subquery: %p, tableIndex:%d, vnodeIdx:%d, type:%d, exprInfo:%d, colList:%d," + "fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64, + pSql, pNew, tableIndex, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type, pNewQueryInfo->exprsInfo.numOfExprs, + pNewQueryInfo->colList.numOfCols, pNewQueryInfo->fieldsInfo.numOfOutputCols, pFinalInfo->name, pNewQueryInfo->stime, + pNewQueryInfo->etime, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit); + + tscPrintSelectClause(pNew, 0); - tscTrace("%p new subquery %p, vnodeIdx:%d, tableIndex:%d, type:%d", pSql, pNew, vnodeIndex, tableIndex, - pNew->cmd.type); return pNew; } void tscDoQuery(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; void* fp = pSql->fp; - + + pSql->res.code = TSDB_CODE_SUCCESS; + if (pCmd->command > TSDB_SQL_LOCAL) { tscProcessLocalCmd(pSql); } else { @@ -1812,8 +2028,8 @@ void tscDoQuery(SSqlObj* pSql) { tscAddIntoSqlList(pSql); } - if (pCmd->isInsertFromFile == 1) { - tscProcessMultiVnodesInsertForFile(pSql); + if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) { + tscProcessMultiVnodesInsertFromFile(pSql); } else { // pSql may be released in this function if it is a async insertion. tscProcessSql(pSql); @@ -1822,9 +2038,7 @@ void tscDoQuery(SSqlObj* pSql) { } } -int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid) { - STagCond* pTagCond = &pCmd->tagCond; - +int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid) { if (pTagCond->joinInfo.left.uid == uid) { return pTagCond->joinInfo.left.tagCol; } else { @@ -1840,31 +2054,182 @@ bool tscIsUpdateQuery(STscObj* pObj) { SSqlCmd* pCmd = &pObj->pSql->cmd; return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || - TSDB_SQL_USE_DB == pCmd->command) ? 1 : 0; + TSDB_SQL_USE_DB == pCmd->command) + ? 1 + : 0; } -int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql) { - const char *msgFormat1 = "invalid SQL: %s"; - const char *msgFormat2 = "invalid SQL: syntax error near \"%s\" (%s)"; - const char *msgFormat3 = "invalid SQL: syntax error near \"%s\""; - +int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) { + const char* msgFormat1 = "invalid SQL: %s"; + const char* msgFormat2 = "invalid SQL: syntax error near \"%s\" (%s)"; + const char* msgFormat3 = "invalid SQL: syntax error near \"%s\""; + const int32_t BACKWARD_CHAR_STEP = 0; - + if (sql == NULL) { assert(additionalInfo != NULL); sprintf(msg, msgFormat1, additionalInfo); return TSDB_CODE_INVALID_SQL; } - - char buf[64] = {0}; // only extract part of sql string + + char buf[64] = {0}; // only extract part of sql string strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1); - + if (additionalInfo != NULL) { sprintf(msg, msgFormat2, buf, additionalInfo); } else { - sprintf(msg, msgFormat3, buf); // no additional information for invalid sql error + sprintf(msg, msgFormat3, buf); // no additional information for invalid sql error } - + return TSDB_CODE_INVALID_SQL; } +bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) { + assert(pQueryInfo != NULL && pQueryInfo->clauseLimit != 0); + return (pQueryInfo->clauseLimit > 0 && pRes->numOfTotalInCurrentClause >= pQueryInfo->clauseLimit); +} + +char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; } + +/** + * If current vnode query does not return results anymore (pRes->numOfRows == 0), try the next vnode if exists, + * in case of multi-vnode super table projection query and the result does not reach the limitation. + */ +bool hasMoreVnodesToTry(SSqlObj* pSql) { + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + if (!UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo) || (pMeterMetaInfo->pMetricMeta == NULL)) { + return false; + } + + int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes; + return pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && + (!tscHasReachLimitation(pQueryInfo, pRes)) && (pMeterMetaInfo->vnodeIndex < totalVnode - 1); +} + +void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + /* + * no result returned from the current virtual node anymore, try the next vnode if exists + * if case of: multi-vnode super table projection query + */ + assert(pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !tscHasReachLimitation(pQueryInfo, pRes)); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0); + int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes; + + while (++pMeterMetaInfo->vnodeIndex < totalVnode) { + tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql, + pMeterMetaInfo->vnodeIndex - 1, pMeterMetaInfo->vnodeIndex, totalVnode, pRes->numOfTotalInCurrentClause); + + /* + * update the limit and offset value for the query on the next vnode, + * according to current retrieval results + * + * NOTE: + * if the pRes->offset is larger than 0, the start returned position has not reached yet. + * Therefore, the pRes->numOfRows, as well as pRes->numOfTotalInCurrentClause, must be 0. + * The pRes->offset value will be updated by virtual node, during query execution. + */ + if (pQueryInfo->clauseLimit >= 0) { + pQueryInfo->limit.limit = pQueryInfo->clauseLimit - pRes->numOfTotalInCurrentClause; + } + + pQueryInfo->limit.offset = pRes->offset; + + assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); + tscTrace("%p new query to next vnode, vnode index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64, pSql, + pMeterMetaInfo->vnodeIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit); + + /* + * For project query with super table join, the numOfSub is equalled to the number of all subqueries. + * Therefore, we need to reset the value of numOfSubs to be 0. + * + * For super table join with projection query, if anyone of the subquery is exhausted, the query completed. + */ + pSql->numOfSubs = 0; + pCmd->command = TSDB_SQL_SELECT; + + tscResetForNextRetrieve(pRes); + + // in case of async query, set the callback function + void* fp1 = pSql->fp; + pSql->fp = fp; + + if (fp1 != NULL) { + assert(fp != NULL); + } + + int32_t ret = tscProcessSql(pSql); // todo check for failure + + // in case of async query, return now + if (fp != NULL) { + return; + } + + if (ret != TSDB_CODE_SUCCESS) { + pSql->res.code = ret; + return; + } + + // retrieve data + assert(pCmd->command == TSDB_SQL_SELECT); + pCmd->command = TSDB_SQL_FETCH; + + if ((ret = tscProcessSql(pSql)) != TSDB_CODE_SUCCESS) { + pSql->res.code = ret; + return; + } + + // if the result from current virtual node are empty, try next if exists. otherwise, return the results. + if (pRes->numOfRows > 0) { + break; + } + } + + if (pRes->numOfRows == 0) { + tscTrace("%p all vnodes exhausted, prj query completed. total res:%d", pSql, totalVnode, pRes->numOfTotal); + } +} + +void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) { + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + // current subclause is completed, try the next subclause + assert(pCmd->clauseIndex < pCmd->numOfClause - 1); + + pCmd->clauseIndex++; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + + pSql->cmd.command = pQueryInfo->command; + + //backup the total number of result first + int64_t num = pRes->numOfTotal + pRes->numOfTotalInCurrentClause; + tscFreeResData(pSql); + + pRes->numOfTotal = num; + + tfree(pSql->pSubs); + pSql->numOfSubs = 0; + + if (pSql->fp != NULL) { + pSql->fp = queryFp; + assert(queryFp != NULL); + } + + tscTrace("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause); + if (pCmd->command > TSDB_SQL_LOCAL) { + tscProcessLocalCmd(pSql); + } else { + tscProcessSql(pSql); + } +} diff --git a/src/connector/go/src/taosSql/result.go b/src/connector/go/src/taosSql/result.go index e6b0adcac7686c9bb0091a172bacda6c7cc02eb3..2367560c31d06f56ac27c20d4a5a5aa099051077 100755 --- a/src/connector/go/src/taosSql/result.go +++ b/src/connector/go/src/taosSql/result.go @@ -25,13 +25,13 @@ package taosSql import "C" import ( - "database/sql/driver" + "database/sql/driver" "errors" - "strconv" - "unsafe" "fmt" "io" + "strconv" "time" + "unsafe" ) /****************************************************************************** @@ -41,10 +41,10 @@ import ( func (mc *taosConn) readColumns(count int) ([]taosSqlField, error) { columns := make([]taosSqlField, count) - var result unsafe.Pointer + var result unsafe.Pointer result = C.taos_use_result(mc.taos) if result == nil { - return nil , errors.New("invalid result") + return nil, errors.New("invalid result") } pFields := (*C.struct_taosField)(C.taos_fetch_fields(result)) @@ -52,7 +52,7 @@ func (mc *taosConn) readColumns(count int) ([]taosSqlField, error) { // TODO: Optimized rewriting !!!! fields := (*[1 << 30]C.struct_taosField)(unsafe.Pointer(pFields)) - for i := 0; i int of C + dest[i] = (int)(*((*int32)(currentRow))) // notes int32 of go <----> int of C break case C.TSDB_DATA_TYPE_BIGINT: @@ -142,7 +143,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error { charLen := rows.rs.columns[i].length var index uint32 binaryVal := make([]byte, charLen) - for index=0; index < charLen; index++ { + for index = 0; index < charLen; index++ { binaryVal[index] = *((*byte)(unsafe.Pointer(uintptr(currentRow) + uintptr(index)))) } dest[i] = string(binaryVal[:]) @@ -152,7 +153,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error { if mc.cfg.parseTime == true { timestamp := (int64)(*((*int64)(currentRow))) dest[i] = timestampConvertToString(timestamp, int(C.taos_result_precision(result))) - }else { + } else { dest[i] = (int64)(*((*int64)(currentRow))) } break @@ -182,12 +183,12 @@ func timestampConvertToString(timestamp int64, precision int) string { var decimal, sVal, nsVal int64 if precision == 0 { decimal = timestamp % 1000 - sVal = timestamp / 1000 - nsVal = decimal * 1000 + sVal = timestamp / 1000 + nsVal = decimal * 1000 } else { decimal = timestamp % 1000000 - sVal = timestamp / 1000000 - nsVal = decimal * 1000000 + sVal = timestamp / 1000000 + nsVal = decimal * 1000000 } date_time := time.Unix(sVal, nsVal) diff --git a/src/connector/grafana/tdengine/.gitignore b/src/connector/grafana/tdengine/.gitignore new file mode 100755 index 0000000000000000000000000000000000000000..b58b02b878ca346b90de77891af8b46bebad00e4 --- /dev/null +++ b/src/connector/grafana/tdengine/.gitignore @@ -0,0 +1,37 @@ +node_modules +npm-debug.log +coverage/ +.aws-config.json +awsconfig +/emails/dist +/public_gen +/tmp +vendor/phantomjs/phantomjs + +docs/AWS_S3_BUCKET +docs/GIT_BRANCH +docs/VERSION +docs/GITCOMMIT +docs/changed-files +docs/changed-files + +# locally required config files +public/css/*.min.css + +# Editor junk +*.sublime-workspace +*.swp +.idea/ +*.iml + +/data/* +/bin/* + +conf/custom.ini +fig.yml +profile.cov +grafana +.notouch + +# Test artifacts +/dist/test/ diff --git a/src/connector/grafana/tdengine/.jscs.json b/src/connector/grafana/tdengine/.jscs.json new file mode 100755 index 0000000000000000000000000000000000000000..a51ef6610e1243e281be7ac36c30a3865c10ee3e --- /dev/null +++ b/src/connector/grafana/tdengine/.jscs.json @@ -0,0 +1,14 @@ +{ + "esnext": true, + "disallowImplicitTypeConversion": ["string"], + "disallowKeywords": ["with"], + "disallowMultipleLineBreaks": true, + "disallowMixedSpacesAndTabs": true, + "disallowTrailingWhitespace": true, + "requireSpacesInFunctionExpression": { + "beforeOpeningCurlyBrace": true + }, + "disallowSpacesInsideArrayBrackets": true, + "disallowSpacesInsideParentheses": true, + "validateIndentation": 2 +} diff --git a/src/connector/grafana/tdengine/Gruntfile.js b/src/connector/grafana/tdengine/Gruntfile.js new file mode 100755 index 0000000000000000000000000000000000000000..ad48c7f8483a4ebe1bfbf7f4960c7cd1419fe64a --- /dev/null +++ b/src/connector/grafana/tdengine/Gruntfile.js @@ -0,0 +1,85 @@ +module.exports = function(grunt) { + + require('load-grunt-tasks')(grunt); + + grunt.loadNpmTasks('grunt-execute'); + grunt.loadNpmTasks('grunt-contrib-clean'); + + grunt.initConfig({ + + clean: ["dist"], + + copy: { + src_to_dist: { + cwd: 'src', + expand: true, + src: ['**/*', '!**/*.js', '!**/*.scss'], + dest: 'dist' + }, + dashboard_to_dist: { + expand: true, + src: ['dashboard/*'], + dest: 'dist' + }, + pluginDef: { + expand: true, + src: ['README.md'], + dest: 'dist' + } + }, + + watch: { + rebuild_all: { + files: ['src/**/*'], + tasks: ['default'], + options: {spawn: false} + } + }, + + babel: { + options: { + sourceMap: true, + presets: ['env'], + plugins: ['transform-object-rest-spread'] + }, + dist: { + files: [{ + cwd: 'src', + expand: true, + src: ['**/*.js'], + dest: 'dist', + ext:'.js' + }] + }, + distTestNoSystemJs: { + files: [{ + cwd: 'src', + expand: true, + src: ['**/*.js'], + dest: 'dist/test', + ext:'.js' + }] + }, + distTestsSpecsNoSystemJs: { + files: [{ + expand: true, + cwd: 'spec', + src: ['**/*.js'], + dest: 'dist/test/spec', + ext:'.js' + }] + } + }, + + mochaTest: { + test: { + options: { + reporter: 'spec' + }, + src: ['dist/test/spec/test-main.js', 'dist/test/spec/*_spec.js'] + } + } + }); + + grunt.registerTask('default', ['clean', 'copy:src_to_dist', 'copy:dashboard_to_dist', 'copy:pluginDef', 'babel', 'mochaTest']); +}; diff --git a/src/connector/grafana/tdengine/LICENSE b/src/connector/grafana/tdengine/LICENSE new file mode 100755 index 0000000000000000000000000000000000000000..20c71216de3fca699b10a3e22460f0e2d55453c8 --- /dev/null +++ b/src/connector/grafana/tdengine/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. \ No newline at end of file diff --git a/src/connector/grafana/tdengine/README.md b/src/connector/grafana/tdengine/README.md index 91dc73daf15c6d4979c41f61f96d87100ec948fc..3012a54e9f34925a21d43c8258e472c49510e999 100644 --- a/src/connector/grafana/tdengine/README.md +++ b/src/connector/grafana/tdengine/README.md @@ -61,7 +61,7 @@ Example response Example request ``` javascript - get request + Get request /heartbeat ``` Example response @@ -70,3 +70,27 @@ Example response "message": "Grafana server receive a quest from you!" } ``` + +### Dev setup + +This plugin requires node 6.10.0 + +``` javascript + +npm install -g yarn +yarn install +npm run build + +``` + +### Import Dashboard + +after login `http://localhost:3000 `, then you can import the tdengine demo dashboard to monitor the system metrics. + +you can import the `dashboard/tdengine-grafana.json`: + +![import_dashboard](dashboard/import_dashboard.png) + +after finished import: + +![import_dashboard](dashboard/tdengine_dashboard.png) diff --git a/src/connector/grafana/tdengine/dashboard/import_dashboard.png b/src/connector/grafana/tdengine/dashboard/import_dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..be3704cb72d6c2614614852bfef17147ce49d061 Binary files /dev/null and b/src/connector/grafana/tdengine/dashboard/import_dashboard.png differ diff --git a/src/connector/grafana/tdengine/dashboard/tdengine-grafana.json b/src/connector/grafana/tdengine/dashboard/tdengine-grafana.json new file mode 100755 index 0000000000000000000000000000000000000000..7a002a3d6b5d2f5dbf7c0b29c01dd8931775792c --- /dev/null +++ b/src/connector/grafana/tdengine/dashboard/tdengine-grafana.json @@ -0,0 +1,588 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "TDengine", + "description": "total select request per minute last hour", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 8, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "次数/min", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "alias": "req_select", + "refId": "A", + "sql": "select sum(req_select) from log.dn where ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": "120,240", + "timeFrom": null, + "timeShift": null, + "title": "req select", + "type": "singlestat", + "valueFontSize": "150%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "total" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "TDengine", + "description": "total insert request per minute for last hour", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "次数/min", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "alias": "req_insert", + "refId": "A", + "sql": "select sum(req_insert) from log.dn where ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": "110,240", + "timeFrom": null, + "timeShift": null, + "title": "req insert", + "type": "singlestat", + "valueFontSize": "150%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "total" + }, + { + "datasource": "TDengine", + "description": "taosd max memery last 10 minutes", + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 6 + }, + "id": 12, + "options": { + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "mappings": [], + "max": 4096, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + }, + { + "color": "#EAB839", + "value": 2048 + } + ], + "unit": "decmbytes" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": true, + "showThresholdMarkers": true + }, + "pluginVersion": "6.4.3", + "targets": [ + { + "alias": "mem_taosd", + "refId": "A", + "sql": "select max(mem_taosd) from log.dn where ts >= now -10m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "taosd memery", + "type": "gauge" + }, + { + "datasource": "TDengine", + "description": "max System Memory last 1 hour", + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 6 + }, + "id": 10, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "mappings": [], + "max": 4, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-orange", + "value": 60 + }, + { + "color": "dark-red", + "value": 80 + } + ], + "title": "", + "unit": "decmbytes" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": true, + "showThresholdMarkers": true + }, + "pluginVersion": "6.4.3", + "targets": [ + { + "alias": "mem_system", + "refId": "A", + "sql": "select max(mem_system) from log.dn where ts >= now -10h and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "system memory", + "type": "gauge" + }, + { + "datasource": "TDengine", + "description": "avg band speed last one minute", + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 6 + }, + "id": 14, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ], + "unit": "Kbits" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": true, + "showThresholdMarkers": true + }, + "pluginVersion": "6.4.3", + "targets": [ + { + "alias": "band_speed", + "refId": "A", + "sql": "select avg(band_speed) from log.dn where ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "band speed", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "TDengine", + "description": "monitor system cpu", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 12 + }, + "hideTimeOverride": true, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pluginVersion": "6.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "cpu_system11", + "hide": false, + "refId": "A", + "sql": "select avg(cpu_system) from log.dn where ts >= now-1h and ts < now interval(1s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "cpu_taosd", + "hide": false, + "refId": "B", + "sql": "select avg(cpu_taosd) from log.dn where ts >= now-1h and ts < now interval(1s)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "cpu_system", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percent", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "TDengine", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 12 + }, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "refId": "A", + "sql": "select avg(disk_used) disk_used from log.dn where ts >= $from and ts < $to interval(1s) group by ipaddr", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "avg_disk_used", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decgbytes", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 20, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "TDengine", + "uid": "FE-vpe0Wk", + "version": 1 +} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/dashboard/tdengine_dashboard.png b/src/connector/grafana/tdengine/dashboard/tdengine_dashboard.png new file mode 100755 index 0000000000000000000000000000000000000000..a4b6a7669a47aeb44734b1c8e3241e9439c19ab7 Binary files /dev/null and b/src/connector/grafana/tdengine/dashboard/tdengine_dashboard.png differ diff --git a/src/connector/grafana/tdengine/datasource.js b/src/connector/grafana/tdengine/datasource.js deleted file mode 100644 index 14eb8a9b3604f02a91ebf2d8a2c5c4f5cbacedb3..0000000000000000000000000000000000000000 --- a/src/connector/grafana/tdengine/datasource.js +++ /dev/null @@ -1,170 +0,0 @@ -'use strict'; - -System.register(['lodash'], function (_export, _context) { - "use strict"; - var _, _createClass, GenericDatasource; - - function strTrim(str) { - return str.replace(/^\s+|\s+$/gm,''); - } - - function _classCallCheck(instance, Constructor) { - if (!(instance instanceof Constructor)) { - throw new TypeError("Cannot call a class as a function"); - } - } - - return { - setters: [function (_lodash) { - _ = _lodash.default; - }], - execute: function () { - _createClass = function () { - function defineProperties(target, props) { - for (var i = 0; i < props.length; i++) { - var descriptor = props[i]; - descriptor.enumerable = descriptor.enumerable || false; - descriptor.configurable = true; - if ("value" in descriptor) descriptor.writable = true; - Object.defineProperty(target, descriptor.key, descriptor); - } - } - - return function (Constructor, protoProps, staticProps) { - if (protoProps) defineProperties(Constructor.prototype, protoProps); - if (staticProps) defineProperties(Constructor, staticProps); - return Constructor; - }; - }(); - - _export('GenericDatasource', GenericDatasource = function () { - function GenericDatasource(instanceSettings, $q, backendSrv, templateSrv) { - _classCallCheck(this, GenericDatasource); - - this.type = instanceSettings.type; - this.url = instanceSettings.url; - this.name = instanceSettings.name; - this.q = $q; - this.backendSrv = backendSrv; - this.templateSrv = templateSrv; - //this.withCredentials = instanceSettings.withCredentials; - this.headers = { 'Content-Type': 'application/json' }; - var taosuser = instanceSettings.jsonData.user; - var taospwd = instanceSettings.jsonData.password; - if (taosuser == null || taosuser == undefined || taosuser == "") { - taosuser = "root"; - } - if (taospwd == null || taospwd == undefined || taospwd == "") { - taospwd = "taosdata"; - } - - this.headers.Authorization = "Basic " + this.encode(taosuser + ":" + taospwd); - } - - _createClass(GenericDatasource, [{ - key: 'encode', - value: function encode(input) { - var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; - var output = ""; - var chr1, chr2, chr3, enc1, enc2, enc3, enc4; - var i = 0; - while (i < input.length) { - chr1 = input.charCodeAt(i++); - chr2 = input.charCodeAt(i++); - chr3 = input.charCodeAt(i++); - enc1 = chr1 >> 2; - enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); - enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); - enc4 = chr3 & 63; - if (isNaN(chr2)) { - enc3 = enc4 = 64; - } else if (isNaN(chr3)) { - enc4 = 64; - } - output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4); - } - - return output; - } - }, { - key: 'generateSql', - value: function generateSql(sql, queryStart, queryEnd, intervalMs) { - if (queryStart == undefined || queryStart == null) { - queryStart = "now-1h"; - } - if (queryEnd == undefined || queryEnd == null) { - queryEnd = "now"; - } - if (intervalMs == undefined || intervalMs == null) { - intervalMs = "20000"; - } - - intervalMs += "a"; - sql = sql.replace(/^\s+|\s+$/gm, ''); - sql = sql.replace("$from", "'" + queryStart + "'"); - sql = sql.replace("$begin", "'" + queryStart + "'"); - sql = sql.replace("$to", "'" + queryEnd + "'"); - sql = sql.replace("$end", "'" + queryEnd + "'"); - sql = sql.replace("$interval", intervalMs); - - return sql; - } - }, { - key: 'query', - value: function query(options) { - var querys = new Array; - for (var i = 0; i < options.targets.length; ++i) { - var query = new Object; - - query.refId = options.targets[i].refId; - query.alias = options.targets[i].alias; - if (query.alias == null || query.alias == undefined) { - query.alias = ""; - } - - //query.sql = this.generateSql(options.targets[i].sql, options.range.raw.from, options.range.raw.to, options.intervalMs); - query.sql = this.generateSql(options.targets[i].sql, options.range.from.toISOString(), options.range.to.toISOString(), options.intervalMs); - console.log(query.sql); - - querys.push(query); - } - - if (querys.length <= 0) { - return this.q.when({ data: [] }); - } - - return this.doRequest({ - url: this.url + '/grafana/query', - data: querys, - method: 'POST' - }); - } - }, { - key: 'testDatasource', - value: function testDatasource() { - return this.doRequest({ - url: this.url + '/grafana/heartbeat', - method: 'GET' - }).then(function (response) { - if (response.status === 200) { - return { status: "success", message: "TDengine Data source is working", title: "Success" }; - } - }); - } - }, { - key: 'doRequest', - value: function doRequest(options) { - options.headers = this.headers; - //console.log(options); - return this.backendSrv.datasourceRequest(options); - } - }]); - - return GenericDatasource; - }()); - - _export('GenericDatasource', GenericDatasource); - } - }; -}); -//# sourceMappingURL=datasource.js.map diff --git a/src/connector/grafana/tdengine/dist/README.md b/src/connector/grafana/tdengine/dist/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3012a54e9f34925a21d43c8258e472c49510e999 --- /dev/null +++ b/src/connector/grafana/tdengine/dist/README.md @@ -0,0 +1,96 @@ +TDengine Datasource - build by Taosdata Inc. www.taosdata.com + +TDengine backend server implement 2 urls: + + * `/heartbeat` return 200 ok. Used for "Test connection" on the datasource config page. + * `/query` return data based on input sqls. + +## Installation + +To install this plugin: +Copy the data source you want to /var/lib/grafana/plugins/. Then restart grafana-server. The new data source should now be available in the data source type dropdown in the Add Data Source View. + +``` +cp -r /connector/grafana/tdengine /var/lib/grafana/plugins/ +sudo service grafana-server restart +``` + +### Query API + +Example request +``` javascript +[{ + "refId": "A", + "alias": "taosd-memory", + "sql": "select avg(mem_taosd) from sys.dn where ts > now-5m and ts < now interval(500a)" +}, +{ + "refId": "B", + "alias": "system-memory", + "sql": "select avg(mem_system) from sys.dn where ts > now-5m and ts < now interval(500a)" +}] +``` + +Example response +``` javascript +[{ + "datapoints": [ + [206.488281, 1538137825000], + [206.488281, 1538137855000], + [206.488281, 1538137885500], + [210.609375, 1538137915500], + [210.867188, 1538137945500] + ], + "refId": "A", + "target": "taosd-memory" +}, +{ + "datapoints": [ + [2910.218750, 1538137825000], + [2912.265625, 1538137855000], + [2912.437500, 1538137885500], + [2916.644531, 1538137915500], + [2917.066406, 1538137945500] + ], + "refId": "B", + "target": "system-memory" +}] +``` + +### Heartbeat API + +Example request +``` javascript + Get request /heartbeat +``` + +Example response +``` javascript +{ + "message": "Grafana server receive a quest from you!" +} +``` + +### Dev setup + +This plugin requires node 6.10.0 + +``` javascript + +npm install -g yarn +yarn install +npm run build + +``` + +### Import Dashboard + +after login `http://localhost:3000 `, then you can import the tdengine demo dashboard to monitor the system metrics. + +you can import the `dashboard/tdengine-grafana.json`: + +![import_dashboard](dashboard/import_dashboard.png) + +after finished import: + +![import_dashboard](dashboard/tdengine_dashboard.png) diff --git a/src/connector/grafana/tdengine/css/query-editor.css b/src/connector/grafana/tdengine/dist/css/query-editor.css similarity index 100% rename from src/connector/grafana/tdengine/css/query-editor.css rename to src/connector/grafana/tdengine/dist/css/query-editor.css diff --git a/src/connector/grafana/tdengine/dist/dashboard/import_dashboard.png b/src/connector/grafana/tdengine/dist/dashboard/import_dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..be3704cb72d6c2614614852bfef17147ce49d061 Binary files /dev/null and b/src/connector/grafana/tdengine/dist/dashboard/import_dashboard.png differ diff --git a/src/connector/grafana/tdengine/dist/dashboard/tdengine-grafana.json b/src/connector/grafana/tdengine/dist/dashboard/tdengine-grafana.json new file mode 100644 index 0000000000000000000000000000000000000000..7a002a3d6b5d2f5dbf7c0b29c01dd8931775792c --- /dev/null +++ b/src/connector/grafana/tdengine/dist/dashboard/tdengine-grafana.json @@ -0,0 +1,588 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "TDengine", + "description": "total select request per minute last hour", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 8, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "次数/min", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "alias": "req_select", + "refId": "A", + "sql": "select sum(req_select) from log.dn where ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": "120,240", + "timeFrom": null, + "timeShift": null, + "title": "req select", + "type": "singlestat", + "valueFontSize": "150%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "total" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "TDengine", + "description": "total insert request per minute for last hour", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "次数/min", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "alias": "req_insert", + "refId": "A", + "sql": "select sum(req_insert) from log.dn where ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": "110,240", + "timeFrom": null, + "timeShift": null, + "title": "req insert", + "type": "singlestat", + "valueFontSize": "150%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "total" + }, + { + "datasource": "TDengine", + "description": "taosd max memery last 10 minutes", + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 6 + }, + "id": 12, + "options": { + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "mappings": [], + "max": 4096, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + }, + { + "color": "#EAB839", + "value": 2048 + } + ], + "unit": "decmbytes" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": true, + "showThresholdMarkers": true + }, + "pluginVersion": "6.4.3", + "targets": [ + { + "alias": "mem_taosd", + "refId": "A", + "sql": "select max(mem_taosd) from log.dn where ts >= now -10m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "taosd memery", + "type": "gauge" + }, + { + "datasource": "TDengine", + "description": "max System Memory last 1 hour", + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 6 + }, + "id": 10, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "mappings": [], + "max": 4, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-orange", + "value": 60 + }, + { + "color": "dark-red", + "value": 80 + } + ], + "title": "", + "unit": "decmbytes" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": true, + "showThresholdMarkers": true + }, + "pluginVersion": "6.4.3", + "targets": [ + { + "alias": "mem_system", + "refId": "A", + "sql": "select max(mem_system) from log.dn where ts >= now -10h and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "system memory", + "type": "gauge" + }, + { + "datasource": "TDengine", + "description": "avg band speed last one minute", + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 6 + }, + "id": 14, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ], + "unit": "Kbits" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": true, + "showThresholdMarkers": true + }, + "pluginVersion": "6.4.3", + "targets": [ + { + "alias": "band_speed", + "refId": "A", + "sql": "select avg(band_speed) from log.dn where ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "band speed", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "TDengine", + "description": "monitor system cpu", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 12 + }, + "hideTimeOverride": true, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pluginVersion": "6.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "cpu_system11", + "hide": false, + "refId": "A", + "sql": "select avg(cpu_system) from log.dn where ts >= now-1h and ts < now interval(1s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "cpu_taosd", + "hide": false, + "refId": "B", + "sql": "select avg(cpu_taosd) from log.dn where ts >= now-1h and ts < now interval(1s)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "cpu_system", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percent", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "TDengine", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 12 + }, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "refId": "A", + "sql": "select avg(disk_used) disk_used from log.dn where ts >= $from and ts < $to interval(1s) group by ipaddr", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "avg_disk_used", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decgbytes", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 20, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "TDengine", + "uid": "FE-vpe0Wk", + "version": 1 +} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/dist/dashboard/tdengine_dashboard.png b/src/connector/grafana/tdengine/dist/dashboard/tdengine_dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..a4b6a7669a47aeb44734b1c8e3241e9439c19ab7 Binary files /dev/null and b/src/connector/grafana/tdengine/dist/dashboard/tdengine_dashboard.png differ diff --git a/src/connector/grafana/tdengine/dist/datasource.js b/src/connector/grafana/tdengine/dist/datasource.js new file mode 100644 index 0000000000000000000000000000000000000000..8190394f9304dfc809b9c6c14dfcb1853fb15a72 --- /dev/null +++ b/src/connector/grafana/tdengine/dist/datasource.js @@ -0,0 +1,156 @@ +'use strict'; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.GenericDatasource = undefined; + +var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); + +var _lodash = require('lodash'); + +var _lodash2 = _interopRequireDefault(_lodash); + +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } + +var GenericDatasource = exports.GenericDatasource = function () { + function GenericDatasource(instanceSettings, $q, backendSrv, templateSrv) { + _classCallCheck(this, GenericDatasource); + + this.type = instanceSettings.type; + this.url = instanceSettings.url; + this.name = instanceSettings.name; + this.q = $q; + this.backendSrv = backendSrv; + this.templateSrv = templateSrv; + this.headers = { 'Content-Type': 'application/json' }; + this.headers.Authorization = this.getAuthorization(instanceSettings.jsonData); + } + + _createClass(GenericDatasource, [{ + key: 'query', + value: function query(options) { + var targets = this.buildQueryParameters(options); + + if (targets.length <= 0) { + return this.q.when({ data: [] }); + } + + return this.doRequest({ + url: this.url + '/grafana/query', + data: targets, + method: 'POST' + }); + } + }, { + key: 'testDatasource', + value: function testDatasource() { + return this.doRequest({ + url: this.url + '/grafana/heartbeat', + method: 'GET' + }).then(function (response) { + if (response.status === 200) { + return { status: "success", message: "TDengine Data source is working", title: "Success" }; + } + }); + } + }, { + key: 'doRequest', + value: function doRequest(options) { + options.headers = this.headers; + + return this.backendSrv.datasourceRequest(options); + } + }, { + key: 'buildQueryParameters', + value: function buildQueryParameters(options) { + var _this = this; + + var targets = _lodash2.default.map(options.targets, function (target) { + return { + refId: target.refId, + alias: _this.generateAlias(options, target), + sql: _this.generateSql(options, target) + }; + }); + + return targets; + } + }, { + key: 'encode', + value: function encode(input) { + var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + var output = ""; + var chr1, chr2, chr3, enc1, enc2, enc3, enc4; + var i = 0; + while (i < input.length) { + chr1 = input.charCodeAt(i++); + chr2 = input.charCodeAt(i++); + chr3 = input.charCodeAt(i++); + enc1 = chr1 >> 2; + enc2 = (chr1 & 3) << 4 | chr2 >> 4; + enc3 = (chr2 & 15) << 2 | chr3 >> 6; + enc4 = chr3 & 63; + if (isNaN(chr2)) { + enc3 = enc4 = 64; + } else if (isNaN(chr3)) { + enc4 = 64; + } + output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4); + } + + return output; + } + }, { + key: 'getAuthorization', + value: function getAuthorization(jsonData) { + jsonData = jsonData || {}; + var defaultUser = jsonData.user || "root"; + var defaultPassword = jsonData.password || "taosdata"; + + return "Basic " + this.encode(defaultUser + ":" + defaultPassword); + } + }, { + key: 'generateAlias', + value: function generateAlias(options, target) { + var alias = target.alias || ""; + alias = this.templateSrv.replace(alias, options.scopedVars, 'csv'); + return alias; + } + }, { + key: 'generateSql', + value: function generateSql(options, target) { + var sql = target.sql; + if (sql == null || sql == "") { + return sql; + } + + var queryStart = "now-1h"; + if (options != null && options.range != null && options.range.from != null) { + queryStart = options.range.from.toISOString(); + } + + var queryEnd = "now"; + if (options != null && options.range != null && options.range.to != null) { + queryEnd = options.range.to.toISOString(); + } + var intervalMs = options.intervalMs || "20000"; + + intervalMs += "a"; + sql = sql.replace(/^\s+|\s+$/gm, ''); + sql = sql.replace("$from", "'" + queryStart + "'"); + sql = sql.replace("$begin", "'" + queryStart + "'"); + sql = sql.replace("$to", "'" + queryEnd + "'"); + sql = sql.replace("$end", "'" + queryEnd + "'"); + sql = sql.replace("$interval", intervalMs); + + sql = this.templateSrv.replace(sql, options.scopedVars, 'csv'); + return sql; + } + }]); + + return GenericDatasource; +}(); +//# sourceMappingURL=datasource.js.map diff --git a/src/connector/grafana/tdengine/dist/datasource.js.map b/src/connector/grafana/tdengine/dist/datasource.js.map new file mode 100644 index 0000000000000000000000000000000000000000..0065a5ae0e536992c9c19490b785daf6873a5407 --- /dev/null +++ b/src/connector/grafana/tdengine/dist/datasource.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/datasource.js"],"names":["GenericDatasource","instanceSettings","$q","backendSrv","templateSrv","type","url","name","q","headers","Authorization","getAuthorization","jsonData","options","targets","buildQueryParameters","length","when","data","doRequest","method","then","response","status","message","title","datasourceRequest","_","map","refId","target","alias","generateAlias","sql","generateSql","input","_keyStr","output","chr1","chr2","chr3","enc1","enc2","enc3","enc4","i","charCodeAt","isNaN","charAt","defaultUser","user","defaultPassword","password","encode","replace","scopedVars","queryStart","range","from","toISOString","queryEnd","to","intervalMs"],"mappings":";;;;;;;;;AAAA;;;;;;;;IAEaA,iB,WAAAA,iB;AAEX,6BAAYC,gBAAZ,EAA8BC,EAA9B,EAAkCC,UAAlC,EAA8CC,WAA9C,EAA2D;AAAA;;AACzD,SAAKC,IAAL,GAAYJ,iBAAiBI,IAA7B;AACA,SAAKC,GAAL,GAAWL,iBAAiBK,GAA5B;AACA,SAAKC,IAAL,GAAYN,iBAAiBM,IAA7B;AACA,SAAKC,CAAL,GAASN,EAAT;AACA,SAAKC,UAAL,GAAkBA,UAAlB;AACA,SAAKC,WAAL,GAAmBA,WAAnB;AACA,SAAKK,OAAL,GAAe,EAAC,gBAAgB,kBAAjB,EAAf;AACA,SAAKA,OAAL,CAAaC,aAAb,GAA6B,KAAKC,gBAAL,CAAsBV,iBAAiBW,QAAvC,CAA7B;AACD;;;;0BAEKC,O,EAAS;AACb,UAAIC,UAAU,KAAKC,oBAAL,CAA0BF,OAA1B,CAAd;;AAEA,UAAIC,QAAQE,MAAR,IAAkB,CAAtB,EAAyB;AACvB,eAAO,KAAKR,CAAL,CAAOS,IAAP,CAAY,EAACC,MAAM,EAAP,EAAZ,CAAP;AACD;;AAED,aAAO,KAAKC,SAAL,CAAe;AACpBb,aAAK,KAAKA,GAAL,GAAW,gBADI;AAEpBY,cAAMJ,OAFc;AAGpBM,gBAAQ;AAHY,OAAf,CAAP;AAKD;;;qCAEgB;AACf,aAAO,KAAKD,SAAL,CAAe;AACpBb,aAAK,KAAKA,GAAL,GAAW,oBADI;AAEpBc,gBAAQ;AAFY,OAAf,EAGJC,IAHI,CAGC,oBAAY;AAClB,YAAIC,SAASC,MAAT,KAAoB,GAAxB,EAA6B;AAC3B,iBAAO,EAAEA,QAAQ,SAAV,EAAqBC,SAAS,iCAA9B,EAAiEC,OAAO,SAAxE,EAAP;AACD;AACF,OAPM,CAAP;AAQD;;;8BAESZ,O,EAAS;AACjBA,cAAQJ,OAAR,GAAkB,KAAKA,OAAvB;;AAEA,aAAO,KAAKN,UAAL,CAAgBuB,iBAAhB,CAAkCb,OAAlC,CAAP;AACD;;;yCAEoBA,O,EAAS;AAAA;;AAE5B,UAAIC,UAAUa,iBAAEC,GAAF,CAAMf,QAAQC,OAAd,EAAuB,kBAAU;AAC7C,eAAO;AACLe,iBAAOC,OAAOD,KADT;AAELE,iBAAO,MAAKC,aAAL,CAAmBnB,OAAnB,EAA4BiB,MAA5B,CAFF;AAGLG,eAAK,MAAKC,WAAL,CAAiBrB,OAAjB,EAA0BiB,MAA1B;AAHA,SAAP;AAKD,OANa,CAAd;;AAQA,aAAOhB,OAAP;AACD;;;2BAEMqB,K,EAAO;AACZ,UAAIC,UAAU,mEAAd;AACA,UAAIC,SAAS,EAAb;AACA,UAAIC,IAAJ,EAAUC,IAAV,EAAgBC,IAAhB,EAAsBC,IAAtB,EAA4BC,IAA5B,EAAkCC,IAAlC,EAAwCC,IAAxC;AACA,UAAIC,IAAI,CAAR;AACA,aAAOA,IAAIV,MAAMnB,MAAjB,EAAyB;AACvBsB,eAAOH,MAAMW,UAAN,CAAiBD,GAAjB,CAAP;AACAN,eAAOJ,MAAMW,UAAN,CAAiBD,GAAjB,CAAP;AACAL,eAAOL,MAAMW,UAAN,CAAiBD,GAAjB,CAAP;AACAJ,eAAOH,QAAQ,CAAf;AACAI,eAAQ,CAACJ,OAAO,CAAR,KAAc,CAAf,GAAqBC,QAAQ,CAApC;AACAI,eAAQ,CAACJ,OAAO,EAAR,KAAe,CAAhB,GAAsBC,QAAQ,CAArC;AACAI,eAAOJ,OAAO,EAAd;AACA,YAAIO,MAAMR,IAAN,CAAJ,EAAiB;AACfI,iBAAOC,OAAO,EAAd;AACD,SAFD,MAEO,IAAIG,MAAMP,IAAN,CAAJ,EAAiB;AACtBI,iBAAO,EAAP;AACD;AACDP,iBAASA,SAASD,QAAQY,MAAR,CAAeP,IAAf,CAAT,GAAgCL,QAAQY,MAAR,CAAeN,IAAf,CAAhC,GAAuDN,QAAQY,MAAR,CAAeL,IAAf,CAAvD,GAA8EP,QAAQY,MAAR,CAAeJ,IAAf,CAAvF;AACD;;AAED,aAAOP,MAAP;AACD;;;qCAEgBzB,Q,EAAS;AACxBA,iBAAWA,YAAY,EAAvB;AACA,UAAIqC,cAAcrC,SAASsC,IAAT,IAAiB,MAAnC;AACA,UAAIC,kBAAkBvC,SAASwC,QAAT,IAAqB,UAA3C;;AAEA,aAAO,WAAW,KAAKC,MAAL,CAAYJ,cAAc,GAAd,GAAoBE,eAAhC,CAAlB;AACD;;;kCAEatC,O,EAASiB,M,EAAO;AAC5B,UAAIC,QAAQD,OAAOC,KAAP,IAAgB,EAA5B;AACAA,cAAQ,KAAK3B,WAAL,CAAiBkD,OAAjB,CAAyBvB,KAAzB,EAAgClB,QAAQ0C,UAAxC,EAAoD,KAApD,CAAR;AACA,aAAOxB,KAAP;AACD;;;gCAEWlB,O,EAASiB,M,EAAQ;AAC3B,UAAIG,MAAMH,OAAOG,GAAjB;AACA,UAAIA,OAAO,IAAP,IAAeA,OAAO,EAA1B,EAA6B;AAC3B,eAAOA,GAAP;AACD;;AAED,UAAIuB,aAAa,QAAjB;AACA,UAAI3C,WAAW,IAAX,IAAmBA,QAAQ4C,KAAR,IAAiB,IAApC,IAA4C5C,QAAQ4C,KAAR,CAAcC,IAAd,IAAsB,IAAtE,EAA2E;AACzEF,qBAAa3C,QAAQ4C,KAAR,CAAcC,IAAd,CAAmBC,WAAnB,EAAb;AACD;;AAED,UAAIC,WAAW,KAAf;AACA,UAAI/C,WAAW,IAAX,IAAmBA,QAAQ4C,KAAR,IAAiB,IAApC,IAA4C5C,QAAQ4C,KAAR,CAAcI,EAAd,IAAoB,IAApE,EAAyE;AACvED,mBAAW/C,QAAQ4C,KAAR,CAAcI,EAAd,CAAiBF,WAAjB,EAAX;AACD;AACD,UAAIG,aAAajD,QAAQiD,UAAR,IAAsB,OAAvC;;AAEAA,oBAAc,GAAd;AACA7B,YAAMA,IAAIqB,OAAJ,CAAY,aAAZ,EAA2B,EAA3B,CAAN;AACArB,YAAMA,IAAIqB,OAAJ,CAAY,OAAZ,EAAqB,MAAME,UAAN,GAAmB,GAAxC,CAAN;AACAvB,YAAMA,IAAIqB,OAAJ,CAAY,QAAZ,EAAsB,MAAME,UAAN,GAAmB,GAAzC,CAAN;AACAvB,YAAMA,IAAIqB,OAAJ,CAAY,KAAZ,EAAmB,MAAMM,QAAN,GAAiB,GAApC,CAAN;AACA3B,YAAMA,IAAIqB,OAAJ,CAAY,MAAZ,EAAoB,MAAMM,QAAN,GAAiB,GAArC,CAAN;AACA3B,YAAMA,IAAIqB,OAAJ,CAAY,WAAZ,EAAyBQ,UAAzB,CAAN;;AAEA7B,YAAM,KAAK7B,WAAL,CAAiBkD,OAAjB,CAAyBrB,GAAzB,EAA8BpB,QAAQ0C,UAAtC,EAAkD,KAAlD,CAAN;AACA,aAAOtB,GAAP;AACD","file":"datasource.js","sourcesContent":["import _ from \"lodash\";\n\nexport class GenericDatasource {\n\n constructor(instanceSettings, $q, backendSrv, templateSrv) {\n this.type = instanceSettings.type;\n this.url = instanceSettings.url;\n this.name = instanceSettings.name;\n this.q = $q;\n this.backendSrv = backendSrv;\n this.templateSrv = templateSrv;\n this.headers = {'Content-Type': 'application/json'};\n this.headers.Authorization = this.getAuthorization(instanceSettings.jsonData);\n }\n\n query(options) {\n var targets = this.buildQueryParameters(options);\n\n if (targets.length <= 0) {\n return this.q.when({data: []});\n }\n\n return this.doRequest({\n url: this.url + '/grafana/query',\n data: targets,\n method: 'POST'\n });\n }\n\n testDatasource() {\n return this.doRequest({\n url: this.url + '/grafana/heartbeat',\n method: 'GET',\n }).then(response => {\n if (response.status === 200) {\n return { status: \"success\", message: \"TDengine Data source is working\", title: \"Success\" };\n }\n });\n }\n\n doRequest(options) {\n options.headers = this.headers;\n\n return this.backendSrv.datasourceRequest(options);\n }\n\n buildQueryParameters(options) {\n\n var targets = _.map(options.targets, target => {\n return {\n refId: target.refId,\n alias: this.generateAlias(options, target),\n sql: this.generateSql(options, target)\n };\n });\n\n return targets;\n }\n\n encode(input) {\n var _keyStr = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\";\n var output = \"\";\n var chr1, chr2, chr3, enc1, enc2, enc3, enc4;\n var i = 0;\n while (i < input.length) {\n chr1 = input.charCodeAt(i++);\n chr2 = input.charCodeAt(i++);\n chr3 = input.charCodeAt(i++);\n enc1 = chr1 >> 2;\n enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);\n enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);\n enc4 = chr3 & 63;\n if (isNaN(chr2)) {\n enc3 = enc4 = 64;\n } else if (isNaN(chr3)) {\n enc4 = 64;\n }\n output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4);\n }\n\n return output;\n }\n\n getAuthorization(jsonData){\n jsonData = jsonData || {};\n var defaultUser = jsonData.user || \"root\";\n var defaultPassword = jsonData.password || \"taosdata\";\n\n return \"Basic \" + this.encode(defaultUser + \":\" + defaultPassword);\n }\n\n generateAlias(options, target){\n var alias = target.alias || \"\";\n alias = this.templateSrv.replace(alias, options.scopedVars, 'csv');\n return alias;\n }\n\n generateSql(options, target) {\n var sql = target.sql;\n if (sql == null || sql == \"\"){\n return sql;\n }\n\n var queryStart = \"now-1h\";\n if (options != null && options.range != null && options.range.from != null){\n queryStart = options.range.from.toISOString();\n }\n\n var queryEnd = \"now\";\n if (options != null && options.range != null && options.range.to != null){\n queryEnd = options.range.to.toISOString();\n }\n var intervalMs = options.intervalMs || \"20000\";\n\n intervalMs += \"a\";\n sql = sql.replace(/^\\s+|\\s+$/gm, '');\n sql = sql.replace(\"$from\", \"'\" + queryStart + \"'\");\n sql = sql.replace(\"$begin\", \"'\" + queryStart + \"'\");\n sql = sql.replace(\"$to\", \"'\" + queryEnd + \"'\");\n sql = sql.replace(\"$end\", \"'\" + queryEnd + \"'\");\n sql = sql.replace(\"$interval\", intervalMs);\n\n sql = this.templateSrv.replace(sql, options.scopedVars, 'csv');\n return sql;\n }\n\n}"]} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/img/taosdata_logo.png b/src/connector/grafana/tdengine/dist/img/taosdata_logo.png similarity index 100% rename from src/connector/grafana/tdengine/img/taosdata_logo.png rename to src/connector/grafana/tdengine/dist/img/taosdata_logo.png diff --git a/src/connector/grafana/tdengine/dist/module.js b/src/connector/grafana/tdengine/dist/module.js new file mode 100644 index 0000000000000000000000000000000000000000..3e88e404a8f6d3e227ff20bc755aeeef5b2c7964 --- /dev/null +++ b/src/connector/grafana/tdengine/dist/module.js @@ -0,0 +1,37 @@ +'use strict'; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.AnnotationsQueryCtrl = exports.QueryOptionsCtrl = exports.ConfigCtrl = exports.QueryCtrl = exports.Datasource = undefined; + +var _datasource = require('./datasource'); + +var _query_ctrl = require('./query_ctrl'); + +function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } + +var GenericConfigCtrl = function GenericConfigCtrl() { + _classCallCheck(this, GenericConfigCtrl); +}; + +GenericConfigCtrl.templateUrl = 'partials/config.html'; + +var GenericQueryOptionsCtrl = function GenericQueryOptionsCtrl() { + _classCallCheck(this, GenericQueryOptionsCtrl); +}; + +GenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html'; + +var GenericAnnotationsQueryCtrl = function GenericAnnotationsQueryCtrl() { + _classCallCheck(this, GenericAnnotationsQueryCtrl); +}; + +GenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html'; + +exports.Datasource = _datasource.GenericDatasource; +exports.QueryCtrl = _query_ctrl.GenericDatasourceQueryCtrl; +exports.ConfigCtrl = GenericConfigCtrl; +exports.QueryOptionsCtrl = GenericQueryOptionsCtrl; +exports.AnnotationsQueryCtrl = GenericAnnotationsQueryCtrl; +//# sourceMappingURL=module.js.map diff --git a/src/connector/grafana/tdengine/dist/module.js.map b/src/connector/grafana/tdengine/dist/module.js.map new file mode 100644 index 0000000000000000000000000000000000000000..c6d4dfce1cdf0850807eefe3dafa059bf153d6a0 --- /dev/null +++ b/src/connector/grafana/tdengine/dist/module.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/module.js"],"names":["GenericConfigCtrl","templateUrl","GenericQueryOptionsCtrl","GenericAnnotationsQueryCtrl","Datasource","GenericDatasource","QueryCtrl","GenericDatasourceQueryCtrl","ConfigCtrl","QueryOptionsCtrl","AnnotationsQueryCtrl"],"mappings":";;;;;;;AAAA;;AACA;;;;IAEMA,iB;;;;AACNA,kBAAkBC,WAAlB,GAAgC,sBAAhC;;IAEMC,uB;;;;AACNA,wBAAwBD,WAAxB,GAAsC,6BAAtC;;IAEME,2B;;;;AACNA,4BAA4BF,WAA5B,GAA0C,kCAA1C;;QAGuBG,U,GAArBC,6B;QAC8BC,S,GAA9BC,sC;QACqBC,U,GAArBR,iB;QAC2BS,gB,GAA3BP,uB;QAC+BQ,oB,GAA/BP,2B","file":"module.js","sourcesContent":["import {GenericDatasource} from './datasource';\nimport {GenericDatasourceQueryCtrl} from './query_ctrl';\n\nclass GenericConfigCtrl {}\nGenericConfigCtrl.templateUrl = 'partials/config.html';\n\nclass GenericQueryOptionsCtrl {}\nGenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html';\n\nclass GenericAnnotationsQueryCtrl {}\nGenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html'\n\nexport {\n GenericDatasource as Datasource,\n GenericDatasourceQueryCtrl as QueryCtrl,\n GenericConfigCtrl as ConfigCtrl,\n GenericQueryOptionsCtrl as QueryOptionsCtrl,\n GenericAnnotationsQueryCtrl as AnnotationsQueryCtrl\n};\n"]} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/partials/config.html b/src/connector/grafana/tdengine/dist/partials/config.html similarity index 100% rename from src/connector/grafana/tdengine/partials/config.html rename to src/connector/grafana/tdengine/dist/partials/config.html diff --git a/src/connector/grafana/tdengine/partials/query.editor.html b/src/connector/grafana/tdengine/dist/partials/query.editor.html similarity index 91% rename from src/connector/grafana/tdengine/partials/query.editor.html rename to src/connector/grafana/tdengine/dist/partials/query.editor.html index 4f16dc2aa93d6709c2bb7820ec9f4adec6a8fc7d..4fd209d39459166482139472996d245cd99a0ccd 100644 --- a/src/connector/grafana/tdengine/partials/query.editor.html +++ b/src/connector/grafana/tdengine/dist/partials/query.editor.html @@ -3,7 +3,7 @@
    - +
    diff --git a/src/connector/grafana/tdengine/plugin.json b/src/connector/grafana/tdengine/dist/plugin.json similarity index 51% rename from src/connector/grafana/tdengine/plugin.json rename to src/connector/grafana/tdengine/dist/plugin.json index 6093703b700ed185dc8841c2c3e806ab9568e36f..e9954ce6ce16c7b943f3002896144891c9dbc629 100644 --- a/src/connector/grafana/tdengine/plugin.json +++ b/src/connector/grafana/tdengine/dist/plugin.json @@ -1,6 +1,6 @@ { "name": "TDengine", - "id": "tdengine", + "id": "taosdata-tdengine-datasource", "type": "datasource", "partials": { @@ -9,10 +9,9 @@ "metrics": true, "annotations": false, - "alerting": true, "info": { - "description": "TDengine datasource", + "description": "grafana datasource plugin for tdengine", "author": { "name": "Taosdata Inc.", "url": "https://www.taosdata.com" @@ -21,8 +20,12 @@ "small": "img/taosdata_logo.png", "large": "img/taosdata_logo.png" }, - "version": "1.6.0", - "updated": "2019-07-01" + "links": [ + {"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"}, + {"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"} + ], + "version": "1.0.0", + "updated": "2020-01-13" }, "dependencies": { diff --git a/src/connector/grafana/tdengine/dist/query_ctrl.js b/src/connector/grafana/tdengine/dist/query_ctrl.js new file mode 100644 index 0000000000000000000000000000000000000000..0bc8f1cdfbc651eca4d374a9522a2c84c0a074c5 --- /dev/null +++ b/src/connector/grafana/tdengine/dist/query_ctrl.js @@ -0,0 +1,51 @@ +'use strict'; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.GenericDatasourceQueryCtrl = undefined; + +var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); + +var _sdk = require('app/plugins/sdk'); + +require('./css/query-editor.css!'); + +function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } + +function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } + +function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } + +var GenericDatasourceQueryCtrl = exports.GenericDatasourceQueryCtrl = function (_QueryCtrl) { + _inherits(GenericDatasourceQueryCtrl, _QueryCtrl); + + function GenericDatasourceQueryCtrl($scope, $injector) { + _classCallCheck(this, GenericDatasourceQueryCtrl); + + var _this = _possibleConstructorReturn(this, (GenericDatasourceQueryCtrl.__proto__ || Object.getPrototypeOf(GenericDatasourceQueryCtrl)).call(this, $scope, $injector)); + + _this.scope = $scope; + _this.target.target = _this.target.target || 'select metric'; + _this.target.type = _this.target.type || 'timeserie'; + return _this; + } + + _createClass(GenericDatasourceQueryCtrl, [{ + key: 'onChangeInternal', + value: function onChangeInternal() { + this.panelCtrl.refresh(); // Asks the panel to refresh data. + } + }, { + key: 'generateSQL', + value: function generateSQL(query) { + this.lastGenerateSQL = this.datasource.generateSql(this.panelCtrl, this.target); + this.showGenerateSQL = !this.showGenerateSQL; + } + }]); + + return GenericDatasourceQueryCtrl; +}(_sdk.QueryCtrl); + +GenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html'; +//# sourceMappingURL=query_ctrl.js.map diff --git a/src/connector/grafana/tdengine/dist/query_ctrl.js.map b/src/connector/grafana/tdengine/dist/query_ctrl.js.map new file mode 100644 index 0000000000000000000000000000000000000000..f9410918e07578761e3483983b7d9df13373c873 --- /dev/null +++ b/src/connector/grafana/tdengine/dist/query_ctrl.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/query_ctrl.js"],"names":["GenericDatasourceQueryCtrl","$scope","$injector","scope","target","type","panelCtrl","refresh","query","lastGenerateSQL","datasource","generateSql","showGenerateSQL","QueryCtrl","templateUrl"],"mappings":";;;;;;;;;AAAA;;AACA;;;;;;;;IAEaA,0B,WAAAA,0B;;;AAEX,sCAAYC,MAAZ,EAAoBC,SAApB,EAAgC;AAAA;;AAAA,wJACxBD,MADwB,EAChBC,SADgB;;AAG9B,UAAKC,KAAL,GAAaF,MAAb;AACA,UAAKG,MAAL,CAAYA,MAAZ,GAAqB,MAAKA,MAAL,CAAYA,MAAZ,IAAsB,eAA3C;AACA,UAAKA,MAAL,CAAYC,IAAZ,GAAmB,MAAKD,MAAL,CAAYC,IAAZ,IAAoB,WAAvC;AAL8B;AAM/B;;;;uCAEkB;AACjB,WAAKC,SAAL,CAAeC,OAAf,GADiB,CACS;AAC3B;;;gCAEWC,K,EAAO;AACjB,WAAKC,eAAL,GAAuB,KAAKC,UAAL,CAAgBC,WAAhB,CAA6B,KAAKL,SAAlC,EAA6C,KAAKF,MAAlD,CAAvB;AACA,WAAKQ,eAAL,GAAuB,CAAC,KAAKA,eAA7B;AACD;;;;EAjB6CC,c;;AAqBhDb,2BAA2Bc,WAA3B,GAAyC,4BAAzC","file":"query_ctrl.js","sourcesContent":["import {QueryCtrl} from 'app/plugins/sdk';\nimport './css/query-editor.css!'\n\nexport class GenericDatasourceQueryCtrl extends QueryCtrl {\n\n constructor($scope, $injector) {\n super($scope, $injector);\n\n this.scope = $scope;\n this.target.target = this.target.target || 'select metric';\n this.target.type = this.target.type || 'timeserie';\n }\n\n onChangeInternal() {\n this.panelCtrl.refresh(); // Asks the panel to refresh data.\n }\n\n generateSQL(query) {\n this.lastGenerateSQL = this.datasource.generateSql( this.panelCtrl, this.target);\n this.showGenerateSQL = !this.showGenerateSQL;\n }\n\n}\n\nGenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html';"]} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/module.js b/src/connector/grafana/tdengine/module.js deleted file mode 100644 index 8592cf2375564deba7c37fa11ab21f57dc85e843..0000000000000000000000000000000000000000 --- a/src/connector/grafana/tdengine/module.js +++ /dev/null @@ -1,51 +0,0 @@ -'use strict'; - -System.register(['./datasource', './query_ctrl'], function (_export, _context) { - "use strict"; - - var GenericDatasource, GenericDatasourceQueryCtrl, GenericConfigCtrl, GenericQueryOptionsCtrl, GenericAnnotationsQueryCtrl; - - function _classCallCheck(instance, Constructor) { - if (!(instance instanceof Constructor)) { - throw new TypeError("Cannot call a class as a function"); - } - } - - return { - setters: [function (_datasource) { - GenericDatasource = _datasource.GenericDatasource; - }, function (_query_ctrl) { - GenericDatasourceQueryCtrl = _query_ctrl.GenericDatasourceQueryCtrl; - }], - execute: function () { - _export('ConfigCtrl', GenericConfigCtrl = function GenericConfigCtrl() { - _classCallCheck(this, GenericConfigCtrl); - }); - - GenericConfigCtrl.templateUrl = 'partials/config.html'; - - _export('QueryOptionsCtrl', GenericQueryOptionsCtrl = function GenericQueryOptionsCtrl() { - _classCallCheck(this, GenericQueryOptionsCtrl); - }); - - GenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html'; - - _export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl = function GenericAnnotationsQueryCtrl() { - _classCallCheck(this, GenericAnnotationsQueryCtrl); - }); - - GenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html'; - - _export('Datasource', GenericDatasource); - - _export('QueryCtrl', GenericDatasourceQueryCtrl); - - _export('ConfigCtrl', GenericConfigCtrl); - - _export('QueryOptionsCtrl', GenericQueryOptionsCtrl); - - _export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl); - } - }; -}); -//# sourceMappingURL=module.js.map diff --git a/src/connector/grafana/tdengine/package-lock.json b/src/connector/grafana/tdengine/package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..f8d2df1150d0c7c2581de4ceb61e5647d52ab871 --- /dev/null +++ b/src/connector/grafana/tdengine/package-lock.json @@ -0,0 +1,4000 @@ +{ + "name": "TDengine", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@types/estree": { + "version": "0.0.38", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.38.tgz", + "integrity": "sha512-F/v7t1LwS4vnXuPooJQGBRKRGIoxWUTmA4VHfqjOccFsNDThD5bfUNpITive6s352O7o384wcpEaDV8rHCehDA==", + "dev": true + }, + "@types/node": { + "version": "12.12.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-12.12.21.tgz", + "integrity": "sha512-8sRGhbpU+ck1n0PGAUgVrWrWdjSW2aqNeyC15W88GRsMpSwzv6RJGlLhE7s2RhVSOdyDmxbqlWSeThq4/7xqlA==", + "dev": true + }, + "abab": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/abab/-/abab-1.0.4.tgz", + "integrity": "sha1-X6rZwsB/YN12dw9xzwJbYqY8/U4=", + "dev": true + }, + "abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "dev": true + }, + "acorn": { + "version": "4.0.13", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-4.0.13.tgz", + "integrity": "sha1-EFSVrlNh1pe9GVyCUZLhrX8lN4c=", + "dev": true + }, + "acorn-globals": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-3.1.0.tgz", + "integrity": "sha1-/YJw9x+7SZawBPqIDuXUZXOnMb8=", + "dev": true, + "requires": { + "acorn": "^4.0.4" + } + }, + "ajv": { + "version": "6.10.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.10.2.tgz", + "integrity": "sha512-TXtUUEYHuaTEbLZWIKUr5pmBuhDLy+8KYtPYdcV8qC+pOZL+NKqYwvWSRrVXHn+ZmRRAu8vJTAznH7Oag6RVRw==", + "dev": true, + "requires": { + "fast-deep-equal": "^2.0.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "align-text": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/align-text/-/align-text-0.1.4.tgz", + "integrity": "sha1-DNkKVhCT810KmSVsIrcGlDP60Rc=", + "dev": true, + "requires": { + "kind-of": "^3.0.2", + "longest": "^1.0.1", + "repeat-string": "^1.5.2" + } + }, + "amdefine": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/amdefine/-/amdefine-1.0.1.tgz", + "integrity": "sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=", + "dev": true + }, + "ansi-colors": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.3.tgz", + "integrity": "sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw==", + "dev": true + }, + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + }, + "dependencies": { + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + } + } + }, + "array-differ": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/array-differ/-/array-differ-1.0.0.tgz", + "integrity": "sha1-7/UuN1gknTO+QCuLuOVkuytdQDE=", + "dev": true + }, + "array-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/array-equal/-/array-equal-1.0.0.tgz", + "integrity": "sha1-jCpe8kcv2ep0KwTHenUJO6J1fJM=", + "dev": true + }, + "array-find-index": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", + "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=", + "dev": true + }, + "array-union": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", + "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=", + "dev": true, + "requires": { + "array-uniq": "^1.0.1" + } + }, + "array-uniq": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", + "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=", + "dev": true + }, + "arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", + "dev": true + }, + "asn1": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", + "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "dev": true, + "requires": { + "safer-buffer": "~2.1.0" + } + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", + "dev": true + }, + "assertion-error": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", + "dev": true + }, + "async": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/async/-/async-1.5.2.tgz", + "integrity": "sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo=", + "dev": true + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", + "dev": true + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", + "dev": true + }, + "aws4": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.0.tgz", + "integrity": "sha512-Uvq6hVe90D0B2WEnUqtdgY1bATGz3mw33nH9Y+dmA+w5DHvUmBgkr5rM/KCHpCsiFNRUfokW/szpPPgMK2hm4A==", + "dev": true + }, + "babel": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel/-/babel-6.23.0.tgz", + "integrity": "sha1-0NHn2APpdHZb7qMjLU4VPA77kPQ=", + "dev": true + }, + "babel-code-frame": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz", + "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=", + "dev": true, + "requires": { + "chalk": "^1.1.3", + "esutils": "^2.0.2", + "js-tokens": "^3.0.2" + } + }, + "babel-core": { + "version": "6.26.3", + "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.3.tgz", + "integrity": "sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==", + "dev": true, + "requires": { + "babel-code-frame": "^6.26.0", + "babel-generator": "^6.26.0", + "babel-helpers": "^6.24.1", + "babel-messages": "^6.23.0", + "babel-register": "^6.26.0", + "babel-runtime": "^6.26.0", + "babel-template": "^6.26.0", + "babel-traverse": "^6.26.0", + "babel-types": "^6.26.0", + "babylon": "^6.18.0", + "convert-source-map": "^1.5.1", + "debug": "^2.6.9", + "json5": "^0.5.1", + "lodash": "^4.17.4", + "minimatch": "^3.0.4", + "path-is-absolute": "^1.0.1", + "private": "^0.1.8", + "slash": "^1.0.0", + "source-map": "^0.5.7" + } + }, + "babel-generator": { + "version": "6.26.1", + "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.1.tgz", + "integrity": "sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==", + "dev": true, + "requires": { + "babel-messages": "^6.23.0", + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "detect-indent": "^4.0.0", + "jsesc": "^1.3.0", + "lodash": "^4.17.4", + "source-map": "^0.5.7", + "trim-right": "^1.0.1" + }, + "dependencies": { + "jsesc": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz", + "integrity": "sha1-RsP+yMGJKxKwgz25vHYiF226s0s=", + "dev": true + } + } + }, + "babel-helper-builder-binary-assignment-operator-visitor": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz", + "integrity": "sha1-zORReto1b0IgvK6KAsKzRvmlZmQ=", + "dev": true, + "requires": { + "babel-helper-explode-assignable-expression": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-call-delegate": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz", + "integrity": "sha1-7Oaqzdx25Bw0YfiL/Fdb0Nqi340=", + "dev": true, + "requires": { + "babel-helper-hoist-variables": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-define-map": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz", + "integrity": "sha1-pfVtq0GiX5fstJjH66ypgZ+Vvl8=", + "dev": true, + "requires": { + "babel-helper-function-name": "^6.24.1", + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "lodash": "^4.17.4" + } + }, + "babel-helper-explode-assignable-expression": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz", + "integrity": "sha1-8luCz33BBDPFX3BZLVdGQArCLKo=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-function-name": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz", + "integrity": "sha1-00dbjAPtmCQqJbSDUasYOZ01gKk=", + "dev": true, + "requires": { + "babel-helper-get-function-arity": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-get-function-arity": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz", + "integrity": "sha1-j3eCqpNAfEHTqlCQj4mwMbG2hT0=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-hoist-variables": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz", + "integrity": "sha1-HssnaJydJVE+rbyZFKc/VAi+enY=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-optimise-call-expression": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz", + "integrity": "sha1-96E0J7qfc/j0+pk8VKl4gtEkQlc=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-regex": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz", + "integrity": "sha1-MlxZ+QL4LyS3T6zu0DY5VPZJXnI=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "lodash": "^4.17.4" + } + }, + "babel-helper-remap-async-to-generator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz", + "integrity": "sha1-XsWBgnrXI/7N04HxySg5BnbkVRs=", + "dev": true, + "requires": { + "babel-helper-function-name": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-replace-supers": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz", + "integrity": "sha1-v22/5Dk40XNpohPKiov3S2qQqxo=", + "dev": true, + "requires": { + "babel-helper-optimise-call-expression": "^6.24.1", + "babel-messages": "^6.23.0", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helpers": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helpers/-/babel-helpers-6.24.1.tgz", + "integrity": "sha1-NHHenK7DiOXIUOWX5Yom3fN2ArI=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-messages": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-messages/-/babel-messages-6.23.0.tgz", + "integrity": "sha1-8830cDhYA1sqKVHG7F7fbGLyYw4=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-check-es2015-constants": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz", + "integrity": "sha1-NRV7EBQm/S/9PaP3XH0ekYNbv4o=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-syntax-async-functions": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz", + "integrity": "sha1-ytnK0RkbWtY0vzCuCHI5HgZHvpU=", + "dev": true + }, + "babel-plugin-syntax-dynamic-import": { + "version": "6.18.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-dynamic-import/-/babel-plugin-syntax-dynamic-import-6.18.0.tgz", + "integrity": "sha1-jWomIpyDdFqZgqRBBRVyyqF5sdo=", + "dev": true + }, + "babel-plugin-syntax-exponentiation-operator": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz", + "integrity": "sha1-nufoM3KQ2pUoggGmpX9BcDF4MN4=", + "dev": true + }, + "babel-plugin-syntax-object-rest-spread": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz", + "integrity": "sha1-/WU28rzhODb/o6VFjEkDpZe7O/U=", + "dev": true + }, + "babel-plugin-syntax-trailing-function-commas": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz", + "integrity": "sha1-ugNgk3+NBuQBgKQ/4NVhb/9TLPM=", + "dev": true + }, + "babel-plugin-transform-amd-system-wrapper": { + "version": "0.3.7", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-amd-system-wrapper/-/babel-plugin-transform-amd-system-wrapper-0.3.7.tgz", + "integrity": "sha1-Uhx4LTVkRJHJeepoPopeHK/wukI=", + "dev": true, + "requires": { + "babel-template": "^6.9.0" + } + }, + "babel-plugin-transform-async-to-generator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz", + "integrity": "sha1-ZTbjeK/2yx1VF6wOQOs+n8jQh2E=", + "dev": true, + "requires": { + "babel-helper-remap-async-to-generator": "^6.24.1", + "babel-plugin-syntax-async-functions": "^6.8.0", + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-cjs-system-wrapper": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-cjs-system-wrapper/-/babel-plugin-transform-cjs-system-wrapper-0.6.2.tgz", + "integrity": "sha1-vXSUd1KJQk/0k7btRV3klb1xuh0=", + "dev": true, + "requires": { + "babel-template": "^6.9.0" + } + }, + "babel-plugin-transform-es2015-arrow-functions": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz", + "integrity": "sha1-RSaSy3EdX3ncf4XkQM5BufJE0iE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-block-scoped-functions": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz", + "integrity": "sha1-u8UbSflk1wy42OC5ToICRs46YUE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-block-scoping": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz", + "integrity": "sha1-1w9SmcEwjQXBL0Y4E7CgnnOxiV8=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "babel-template": "^6.26.0", + "babel-traverse": "^6.26.0", + "babel-types": "^6.26.0", + "lodash": "^4.17.4" + } + }, + "babel-plugin-transform-es2015-classes": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz", + "integrity": "sha1-WkxYpQyclGHlZLSyo7+ryXolhNs=", + "dev": true, + "requires": { + "babel-helper-define-map": "^6.24.1", + "babel-helper-function-name": "^6.24.1", + "babel-helper-optimise-call-expression": "^6.24.1", + "babel-helper-replace-supers": "^6.24.1", + "babel-messages": "^6.23.0", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-computed-properties": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz", + "integrity": "sha1-b+Ko0WiV1WNPTNmZttNICjCBWbM=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-destructuring": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz", + "integrity": "sha1-mXux8auWf2gtKwh2/jWNYOdlxW0=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-duplicate-keys": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz", + "integrity": "sha1-c+s9MQypaePvnskcU3QabxV2Qj4=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-for-of": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz", + "integrity": "sha1-9HyVsrYT3x0+zC/bdXNiPHUkhpE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-function-name": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz", + "integrity": "sha1-g0yJhTvDaxrw86TF26qU/Y6sqos=", + "dev": true, + "requires": { + "babel-helper-function-name": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-literals": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz", + "integrity": "sha1-T1SgLWzWbPkVKAAZox0xklN3yi4=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-modules-amd": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz", + "integrity": "sha1-Oz5UAXI5hC1tGcMBHEvS8AoA0VQ=", + "dev": true, + "requires": { + "babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-modules-commonjs": { + "version": "6.26.2", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz", + "integrity": "sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==", + "dev": true, + "requires": { + "babel-plugin-transform-strict-mode": "^6.24.1", + "babel-runtime": "^6.26.0", + "babel-template": "^6.26.0", + "babel-types": "^6.26.0" + } + }, + "babel-plugin-transform-es2015-modules-systemjs": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz", + "integrity": "sha1-/4mhQrkRmpBhlfXxBuzzBdlAfSM=", + "dev": true, + "requires": { + "babel-helper-hoist-variables": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-modules-umd": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz", + "integrity": "sha1-rJl+YoXNGO1hdq22B9YCNErThGg=", + "dev": true, + "requires": { + "babel-plugin-transform-es2015-modules-amd": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-object-super": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz", + "integrity": "sha1-JM72muIcuDp/hgPa0CH1cusnj40=", + "dev": true, + "requires": { + "babel-helper-replace-supers": "^6.24.1", + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-parameters": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz", + "integrity": "sha1-V6w1GrScrxSpfNE7CfZv3wpiXys=", + "dev": true, + "requires": { + "babel-helper-call-delegate": "^6.24.1", + "babel-helper-get-function-arity": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-shorthand-properties": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz", + "integrity": "sha1-JPh11nIch2YbvZmkYi5R8U3jiqA=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-spread": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz", + "integrity": "sha1-1taKmfia7cRTbIGlQujdnxdG+NE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-sticky-regex": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz", + "integrity": "sha1-AMHNsaynERLN8M9hJsLta0V8zbw=", + "dev": true, + "requires": { + "babel-helper-regex": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-template-literals": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz", + "integrity": "sha1-qEs0UPfp+PH2g51taH2oS7EjbY0=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-typeof-symbol": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz", + "integrity": "sha1-3sCfHN3/lLUqxz1QXITfWdzOs3I=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-unicode-regex": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz", + "integrity": "sha1-04sS9C6nMj9yk4fxinxa4frrNek=", + "dev": true, + "requires": { + "babel-helper-regex": "^6.24.1", + "babel-runtime": "^6.22.0", + "regexpu-core": "^2.0.0" + } + }, + "babel-plugin-transform-exponentiation-operator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz", + "integrity": "sha1-KrDJx/MJj6SJB3cruBP+QejeOg4=", + "dev": true, + "requires": { + "babel-helper-builder-binary-assignment-operator-visitor": "^6.24.1", + "babel-plugin-syntax-exponentiation-operator": "^6.8.0", + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-global-system-wrapper": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-global-system-wrapper/-/babel-plugin-transform-global-system-wrapper-0.3.4.tgz", + "integrity": "sha1-lI3X0p/CFEfjm9NEfy3rx/L3Oqw=", + "dev": true, + "requires": { + "babel-template": "^6.9.0" + } + }, + "babel-plugin-transform-object-rest-spread": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.26.0.tgz", + "integrity": "sha1-DzZpLVD+9rfi1LOsFHgTepY7ewY=", + "dev": true, + "requires": { + "babel-plugin-syntax-object-rest-spread": "^6.8.0", + "babel-runtime": "^6.26.0" + } + }, + "babel-plugin-transform-regenerator": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz", + "integrity": "sha1-4HA2lvveJ/Cj78rPi03KL3s6jy8=", + "dev": true, + "requires": { + "regenerator-transform": "^0.10.0" + } + }, + "babel-plugin-transform-strict-mode": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz", + "integrity": "sha1-1fr3qleKZbvlkc9e2uBKDGcCB1g=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-system-register": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-system-register/-/babel-plugin-transform-system-register-0.0.1.tgz", + "integrity": "sha1-nf9AOQwnY6xRjwsq18XqT2WlviU=", + "dev": true + }, + "babel-preset-env": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/babel-preset-env/-/babel-preset-env-1.7.0.tgz", + "integrity": "sha512-9OR2afuKDneX2/q2EurSftUYM0xGu4O2D9adAhVfADDhrYDaxXV0rBbevVYoY9n6nyX1PmQW/0jtpJvUNr9CHg==", + "dev": true, + "requires": { + "babel-plugin-check-es2015-constants": "^6.22.0", + "babel-plugin-syntax-trailing-function-commas": "^6.22.0", + "babel-plugin-transform-async-to-generator": "^6.22.0", + "babel-plugin-transform-es2015-arrow-functions": "^6.22.0", + "babel-plugin-transform-es2015-block-scoped-functions": "^6.22.0", + "babel-plugin-transform-es2015-block-scoping": "^6.23.0", + "babel-plugin-transform-es2015-classes": "^6.23.0", + "babel-plugin-transform-es2015-computed-properties": "^6.22.0", + "babel-plugin-transform-es2015-destructuring": "^6.23.0", + "babel-plugin-transform-es2015-duplicate-keys": "^6.22.0", + "babel-plugin-transform-es2015-for-of": "^6.23.0", + "babel-plugin-transform-es2015-function-name": "^6.22.0", + "babel-plugin-transform-es2015-literals": "^6.22.0", + "babel-plugin-transform-es2015-modules-amd": "^6.22.0", + "babel-plugin-transform-es2015-modules-commonjs": "^6.23.0", + "babel-plugin-transform-es2015-modules-systemjs": "^6.23.0", + "babel-plugin-transform-es2015-modules-umd": "^6.23.0", + "babel-plugin-transform-es2015-object-super": "^6.22.0", + "babel-plugin-transform-es2015-parameters": "^6.23.0", + "babel-plugin-transform-es2015-shorthand-properties": "^6.22.0", + "babel-plugin-transform-es2015-spread": "^6.22.0", + "babel-plugin-transform-es2015-sticky-regex": "^6.22.0", + "babel-plugin-transform-es2015-template-literals": "^6.22.0", + "babel-plugin-transform-es2015-typeof-symbol": "^6.23.0", + "babel-plugin-transform-es2015-unicode-regex": "^6.22.0", + "babel-plugin-transform-exponentiation-operator": "^6.22.0", + "babel-plugin-transform-regenerator": "^6.22.0", + "browserslist": "^3.2.6", + "invariant": "^2.2.2", + "semver": "^5.3.0" + } + }, + "babel-register": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-register/-/babel-register-6.26.0.tgz", + "integrity": "sha1-btAhFz4vy0htestFxgCahW9kcHE=", + "dev": true, + "requires": { + "babel-core": "^6.26.0", + "babel-runtime": "^6.26.0", + "core-js": "^2.5.0", + "home-or-tmp": "^2.0.0", + "lodash": "^4.17.4", + "mkdirp": "^0.5.1", + "source-map-support": "^0.4.15" + } + }, + "babel-runtime": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", + "integrity": "sha1-llxwWGaOgrVde/4E/yM3vItWR/4=", + "dev": true, + "requires": { + "core-js": "^2.4.0", + "regenerator-runtime": "^0.11.0" + } + }, + "babel-template": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-template/-/babel-template-6.26.0.tgz", + "integrity": "sha1-3gPi0WOWsGn0bdn/+FIfsaDjXgI=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "babel-traverse": "^6.26.0", + "babel-types": "^6.26.0", + "babylon": "^6.18.0", + "lodash": "^4.17.4" + } + }, + "babel-traverse": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-traverse/-/babel-traverse-6.26.0.tgz", + "integrity": "sha1-RqnL1+3MYsjlwGTi0tjQ9ANXZu4=", + "dev": true, + "requires": { + "babel-code-frame": "^6.26.0", + "babel-messages": "^6.23.0", + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "babylon": "^6.18.0", + "debug": "^2.6.8", + "globals": "^9.18.0", + "invariant": "^2.2.2", + "lodash": "^4.17.4" + } + }, + "babel-types": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz", + "integrity": "sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "esutils": "^2.0.2", + "lodash": "^4.17.4", + "to-fast-properties": "^1.0.3" + } + }, + "babylon": { + "version": "6.18.0", + "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", + "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "dev": true, + "requires": { + "tweetnacl": "^0.14.3" + } + }, + "bluebird": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", + "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", + "dev": true + }, + "body": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/body/-/body-5.1.0.tgz", + "integrity": "sha1-5LoM5BCkaTYyM2dgnstOZVMSUGk=", + "dev": true, + "requires": { + "continuable-cache": "^0.3.1", + "error": "^7.0.0", + "raw-body": "~1.1.0", + "safe-json-parse": "~1.0.1" + } + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "browserify-zlib": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.1.4.tgz", + "integrity": "sha1-uzX4pRn2AOD6a4SFJByXnQFB+y0=", + "dev": true, + "requires": { + "pako": "~0.2.0" + } + }, + "browserslist": { + "version": "3.2.8", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", + "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30000844", + "electron-to-chromium": "^1.3.47" + } + }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", + "dev": true + }, + "bytes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz", + "integrity": "sha1-NWnt6Lo0MV+rmcPpLLBMciDeH6g=", + "dev": true + }, + "camelcase": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", + "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", + "dev": true + }, + "camelcase-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-2.1.0.tgz", + "integrity": "sha1-MIvur/3ygRkFHvodkyITyRuPkuc=", + "dev": true, + "requires": { + "camelcase": "^2.0.0", + "map-obj": "^1.0.0" + } + }, + "caniuse-lite": { + "version": "1.0.30001016", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001016.tgz", + "integrity": "sha512-yYQ2QfotceRiH4U+h1Us86WJXtVHDmy3nEKIdYPsZCYnOV5/tMgGbmoIlrMzmh2VXlproqYtVaKeGDBkMZifFA==", + "dev": true + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", + "dev": true + }, + "center-align": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/center-align/-/center-align-0.1.3.tgz", + "integrity": "sha1-qg0yYptu6XIgBBHL1EYckHvCt60=", + "dev": true, + "requires": { + "align-text": "^0.1.3", + "lazy-cache": "^1.0.3" + } + }, + "chai": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/chai/-/chai-3.5.0.tgz", + "integrity": "sha1-TQJjewZ/6Vi9v906QOxW/vc3Mkc=", + "dev": true, + "requires": { + "assertion-error": "^1.0.1", + "deep-eql": "^0.1.3", + "type-detect": "^1.0.0" + } + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "cliui": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-2.1.0.tgz", + "integrity": "sha1-S0dXYP+AJkx2LDoXGQMukcf+oNE=", + "dev": true, + "requires": { + "center-align": "^0.1.1", + "right-align": "^0.1.1", + "wordwrap": "0.0.2" + } + }, + "coffeescript": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/coffeescript/-/coffeescript-1.10.0.tgz", + "integrity": "sha1-56qDAZF+9iGzXYo580jc3R234z4=", + "dev": true + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "colors": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.1.2.tgz", + "integrity": "sha1-FopHAXVran9RoSzgyXv6KMCE7WM=", + "dev": true + }, + "combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "requires": { + "delayed-stream": "~1.0.0" + } + }, + "commander": { + "version": "2.16.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.16.0.tgz", + "integrity": "sha512-sVXqklSaotK9at437sFlFpyOcJonxe0yST/AG9DkQKUdIE6IqGIMv4SfAQSKaJbSdVEJYItASCrBiVQHq1HQew==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "content-type-parser": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/content-type-parser/-/content-type-parser-1.0.2.tgz", + "integrity": "sha512-lM4l4CnMEwOLHAHr/P6MEZwZFPJFtAAKgL6pogbXmVZggIqXhdB6RbBtPOTsw2FcXwYhehRGERJmRrjOiIB8pQ==", + "dev": true + }, + "continuable-cache": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/continuable-cache/-/continuable-cache-0.3.1.tgz", + "integrity": "sha1-vXJ6f67XfnH/OYWskzUakSczrQ8=", + "dev": true + }, + "convert-source-map": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", + "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.1" + } + }, + "core-js": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz", + "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==", + "dev": true + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cssom": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz", + "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", + "dev": true + }, + "cssstyle": { + "version": "0.2.37", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-0.2.37.tgz", + "integrity": "sha1-VBCXI0yyUTyDzu06zdwn/yeYfVQ=", + "dev": true, + "requires": { + "cssom": "0.3.x" + } + }, + "currently-unhandled": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", + "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=", + "dev": true, + "requires": { + "array-find-index": "^1.0.1" + } + }, + "d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", + "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", + "dev": true, + "requires": { + "es5-ext": "^0.10.50", + "type": "^1.0.1" + } + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, + "data-uri-to-buffer": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-0.0.4.tgz", + "integrity": "sha1-RuE6udqOMJdFyNAc5UchPr2y/j8=", + "dev": true + }, + "dateformat": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-1.0.12.tgz", + "integrity": "sha1-nxJLZ1lMk3/3BpMuSmQsyo27/uk=", + "dev": true, + "requires": { + "get-stdin": "^4.0.1", + "meow": "^3.3.0" + } + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "deep-eql": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-0.1.3.tgz", + "integrity": "sha1-71WKyrjeJSBs1xOQbXTlaTDrafI=", + "dev": true, + "requires": { + "type-detect": "0.1.1" + }, + "dependencies": { + "type-detect": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-0.1.1.tgz", + "integrity": "sha1-C6XsKohWQORw6k6FBZcZANrFiCI=", + "dev": true + } + } + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", + "dev": true + }, + "define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", + "dev": true, + "requires": { + "object-keys": "^1.0.12" + } + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", + "dev": true + }, + "detect-indent": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz", + "integrity": "sha1-920GQ1LN9Docts5hnE7jqUdd4gg=", + "dev": true, + "requires": { + "repeating": "^2.0.0" + } + }, + "diff": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", + "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", + "dev": true + }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", + "dev": true, + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "electron-to-chromium": { + "version": "1.3.322", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.322.tgz", + "integrity": "sha512-Tc8JQEfGQ1MzfSzI/bTlSr7btJv/FFO7Yh6tanqVmIWOuNCu6/D1MilIEgLtmWqIrsv+o4IjpLAhgMBr/ncNAA==", + "dev": true + }, + "emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", + "dev": true + }, + "error": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/error/-/error-7.2.1.tgz", + "integrity": "sha512-fo9HBvWnx3NGUKMvMwB/CBCMMrfEJgbDTVDEkPygA3Bdd3lM1OyCd+rbQ8BwnpF6GdVeOLDNmyL4N5Bg80ZvdA==", + "dev": true, + "requires": { + "string-template": "~0.2.1" + } + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "es-abstract": { + "version": "1.17.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.0-next.1.tgz", + "integrity": "sha512-7MmGr03N7Rnuid6+wyhD9sHNE2n4tFSwExnU2lQl3lIo2ShXWGePY80zYaoMOmILWv57H0amMjZGHNzzGG70Rw==", + "dev": true, + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.1.4", + "is-regex": "^1.0.4", + "object-inspect": "^1.7.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.0", + "string.prototype.trimleft": "^2.1.0", + "string.prototype.trimright": "^2.1.0" + } + }, + "es-to-primitive": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "dev": true, + "requires": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + } + }, + "es5-ext": { + "version": "0.10.53", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", + "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", + "dev": true, + "requires": { + "es6-iterator": "~2.0.3", + "es6-symbol": "~3.1.3", + "next-tick": "~1.0.0" + } + }, + "es6-iterator": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", + "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "^0.10.35", + "es6-symbol": "^3.1.1" + } + }, + "es6-symbol": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", + "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", + "dev": true, + "requires": { + "d": "^1.0.1", + "ext": "^1.1.2" + } + }, + "es6-template-strings": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/es6-template-strings/-/es6-template-strings-2.0.1.tgz", + "integrity": "sha1-sWbGpiVi9Hi7d3X2ypYQOlmbSyw=", + "dev": true, + "requires": { + "es5-ext": "^0.10.12", + "esniff": "^1.1" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "escodegen": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.12.0.tgz", + "integrity": "sha512-TuA+EhsanGcme5T3R0L80u4t8CpbXQjegRmf7+FPTJrtCTErXFeelblRgHQa1FofEzqYYJmJ/OqjTwREp9qgmg==", + "dev": true, + "requires": { + "esprima": "^3.1.3", + "estraverse": "^4.2.0", + "esutils": "^2.0.2", + "optionator": "^0.8.1", + "source-map": "~0.6.1" + }, + "dependencies": { + "esprima": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-3.1.3.tgz", + "integrity": "sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM=", + "dev": true + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "optional": true + } + } + }, + "esniff": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/esniff/-/esniff-1.1.0.tgz", + "integrity": "sha1-xmhJIp+RRk3t4uDUAgHtar9l8qw=", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "^0.10.12" + } + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true + }, + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true + }, + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true + }, + "eventemitter2": { + "version": "0.4.14", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", + "integrity": "sha1-j2G3XN4BKy6esoTUVFWDtWQ7Yas=", + "dev": true + }, + "exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=", + "dev": true + }, + "ext": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", + "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", + "dev": true, + "requires": { + "type": "^2.0.0" + }, + "dependencies": { + "type": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/type/-/type-2.0.0.tgz", + "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==", + "dev": true + } + } + }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "dev": true + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", + "dev": true + }, + "fast-deep-equal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz", + "integrity": "sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk=", + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true + }, + "faye-websocket": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.10.0.tgz", + "integrity": "sha1-TkkvjQTftviQA1B/btvy1QHnxvQ=", + "dev": true, + "requires": { + "websocket-driver": ">=0.5.1" + } + }, + "figures": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", + "integrity": "sha1-y+Hjr/zxzUS4DK3+0o3Hk6lwHS4=", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5", + "object-assign": "^4.1.0" + } + }, + "file-sync-cmp": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/file-sync-cmp/-/file-sync-cmp-0.1.1.tgz", + "integrity": "sha1-peeo/7+kk7Q7kju9TKiaU7Y7YSs=", + "dev": true + }, + "find-up": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz", + "integrity": "sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8=", + "dev": true, + "requires": { + "path-exists": "^2.0.0", + "pinkie-promise": "^2.0.0" + } + }, + "findup-sync": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-0.3.0.tgz", + "integrity": "sha1-N5MKpdgWt3fANEXhlmzGeQpMCxY=", + "dev": true, + "requires": { + "glob": "~5.0.0" + }, + "dependencies": { + "glob": { + "version": "5.0.15", + "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", + "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "dev": true, + "requires": { + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "2 || 3", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } + } + }, + "flat": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/flat/-/flat-4.1.0.tgz", + "integrity": "sha512-Px/TiLIznH7gEDlPXcUD4KnBusa6kR6ayRUVcnEAbreRIuhkqow/mun59BuRXwoYk7ZQOLW1ZM05ilIvK38hFw==", + "dev": true, + "requires": { + "is-buffer": "~2.0.3" + }, + "dependencies": { + "is-buffer": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.4.tgz", + "integrity": "sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A==", + "dev": true + } + } + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", + "dev": true + }, + "form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "dev": true, + "requires": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "gaze": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/gaze/-/gaze-1.1.3.tgz", + "integrity": "sha512-BRdNm8hbWzFzWHERTrejLqwHDfS4GibPoq5wjTPIoJHoBtKGPg3xAFfxmM+9ztbXelxcf2hwQcaz1PtmFeue8g==", + "dev": true, + "requires": { + "globule": "^1.0.0" + } + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true + }, + "get-stdin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", + "integrity": "sha1-uWjGsKBDhDJJAui/Gl3zJXmkUP4=", + "dev": true + }, + "getobject": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/getobject/-/getobject-0.1.0.tgz", + "integrity": "sha1-BHpEl4n6Fg0Bj1SG7ZEyC27HiFw=", + "dev": true + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, + "glob": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.0.6.tgz", + "integrity": "sha1-IRuvr0nlJbjNkyYNFKsTYVKz9Xo=", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.2", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "globals": { + "version": "9.18.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", + "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==", + "dev": true + }, + "globule": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/globule/-/globule-1.2.1.tgz", + "integrity": "sha512-g7QtgWF4uYSL5/dn71WxubOrS7JVGCnFPEnoeChJmBnyR9Mw8nGoEwOgJL/RC2Te0WhbsEUCejfH8SZNJ+adYQ==", + "dev": true, + "requires": { + "glob": "~7.1.1", + "lodash": "~4.17.10", + "minimatch": "~3.0.2" + }, + "dependencies": { + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } + } + }, + "graceful-fs": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", + "dev": true + }, + "graceful-readlink": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/graceful-readlink/-/graceful-readlink-1.0.1.tgz", + "integrity": "sha1-TK+tdrxi8C+gObL5Tpo906ORpyU=", + "dev": true + }, + "growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true + }, + "grunt": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/grunt/-/grunt-1.0.4.tgz", + "integrity": "sha512-PYsMOrOC+MsdGEkFVwMaMyc6Ob7pKmq+deg1Sjr+vvMWp35sztfwKE7qoN51V+UEtHsyNuMcGdgMLFkBHvMxHQ==", + "dev": true, + "requires": { + "coffeescript": "~1.10.0", + "dateformat": "~1.0.12", + "eventemitter2": "~0.4.13", + "exit": "~0.1.1", + "findup-sync": "~0.3.0", + "glob": "~7.0.0", + "grunt-cli": "~1.2.0", + "grunt-known-options": "~1.1.0", + "grunt-legacy-log": "~2.0.0", + "grunt-legacy-util": "~1.1.1", + "iconv-lite": "~0.4.13", + "js-yaml": "~3.13.0", + "minimatch": "~3.0.2", + "mkdirp": "~0.5.1", + "nopt": "~3.0.6", + "path-is-absolute": "~1.0.0", + "rimraf": "~2.6.2" + } + }, + "grunt-babel": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/grunt-babel/-/grunt-babel-6.0.0.tgz", + "integrity": "sha1-N4GJtIfeEWjExKn8iN1gBbNd+WA=", + "dev": true, + "requires": { + "babel-core": "^6.0.12" + } + }, + "grunt-cli": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/grunt-cli/-/grunt-cli-1.2.0.tgz", + "integrity": "sha1-VisRnrsGndtGSs4oRVAb6Xs1tqg=", + "dev": true, + "requires": { + "findup-sync": "~0.3.0", + "grunt-known-options": "~1.1.0", + "nopt": "~3.0.6", + "resolve": "~1.1.0" + }, + "dependencies": { + "resolve": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.1.7.tgz", + "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=", + "dev": true + } + } + }, + "grunt-contrib-clean": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/grunt-contrib-clean/-/grunt-contrib-clean-1.1.0.tgz", + "integrity": "sha1-Vkq/LQN4qYOhW54/MO51tzjEBjg=", + "dev": true, + "requires": { + "async": "^1.5.2", + "rimraf": "^2.5.1" + } + }, + "grunt-contrib-copy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/grunt-contrib-copy/-/grunt-contrib-copy-1.0.0.tgz", + "integrity": "sha1-cGDGWB6QS4qw0A8HbgqPbj58NXM=", + "dev": true, + "requires": { + "chalk": "^1.1.1", + "file-sync-cmp": "^0.1.0" + } + }, + "grunt-contrib-uglify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/grunt-contrib-uglify/-/grunt-contrib-uglify-2.3.0.tgz", + "integrity": "sha1-s9AmDr3WzvoS/y+Onh4ln33kIW8=", + "dev": true, + "requires": { + "chalk": "^1.0.0", + "maxmin": "^1.1.0", + "object.assign": "^4.0.4", + "uglify-js": "~2.8.21", + "uri-path": "^1.0.0" + } + }, + "grunt-contrib-watch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/grunt-contrib-watch/-/grunt-contrib-watch-1.1.0.tgz", + "integrity": "sha512-yGweN+0DW5yM+oo58fRu/XIRrPcn3r4tQx+nL7eMRwjpvk+rQY6R8o94BPK0i2UhTg9FN21hS+m8vR8v9vXfeg==", + "dev": true, + "requires": { + "async": "^2.6.0", + "gaze": "^1.1.0", + "lodash": "^4.17.10", + "tiny-lr": "^1.1.1" + }, + "dependencies": { + "async": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", + "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", + "dev": true, + "requires": { + "lodash": "^4.17.14" + }, + "dependencies": { + "lodash": { + "version": "4.17.15", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", + "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==", + "dev": true + } + } + } + } + }, + "grunt-known-options": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/grunt-known-options/-/grunt-known-options-1.1.1.tgz", + "integrity": "sha512-cHwsLqoighpu7TuYj5RonnEuxGVFnztcUqTqp5rXFGYL4OuPFofwC4Ycg7n9fYwvK6F5WbYgeVOwph9Crs2fsQ==", + "dev": true + }, + "grunt-legacy-log": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/grunt-legacy-log/-/grunt-legacy-log-2.0.0.tgz", + "integrity": "sha512-1m3+5QvDYfR1ltr8hjiaiNjddxGdQWcH0rw1iKKiQnF0+xtgTazirSTGu68RchPyh1OBng1bBUjLmX8q9NpoCw==", + "dev": true, + "requires": { + "colors": "~1.1.2", + "grunt-legacy-log-utils": "~2.0.0", + "hooker": "~0.2.3", + "lodash": "~4.17.5" + } + }, + "grunt-legacy-log-utils": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/grunt-legacy-log-utils/-/grunt-legacy-log-utils-2.0.1.tgz", + "integrity": "sha512-o7uHyO/J+i2tXG8r2bZNlVk20vlIFJ9IEYyHMCQGfWYru8Jv3wTqKZzvV30YW9rWEjq0eP3cflQ1qWojIe9VFA==", + "dev": true, + "requires": { + "chalk": "~2.4.1", + "lodash": "~4.17.10" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "grunt-legacy-util": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/grunt-legacy-util/-/grunt-legacy-util-1.1.1.tgz", + "integrity": "sha512-9zyA29w/fBe6BIfjGENndwoe1Uy31BIXxTH3s8mga0Z5Bz2Sp4UCjkeyv2tI449ymkx3x26B+46FV4fXEddl5A==", + "dev": true, + "requires": { + "async": "~1.5.2", + "exit": "~0.1.1", + "getobject": "~0.1.0", + "hooker": "~0.2.3", + "lodash": "~4.17.10", + "underscore.string": "~3.3.4", + "which": "~1.3.0" + } + }, + "grunt-mocha-test": { + "version": "0.13.3", + "resolved": "https://registry.npmjs.org/grunt-mocha-test/-/grunt-mocha-test-0.13.3.tgz", + "integrity": "sha512-zQGEsi3d+ViPPi7/4jcj78afKKAKiAA5n61pknQYi25Ugik+aNOuRmiOkmb8mN2CeG8YxT+YdT1H1Q7B/eNkoQ==", + "dev": true, + "requires": { + "hooker": "^0.2.3", + "mkdirp": "^0.5.0" + } + }, + "grunt-systemjs-builder": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/grunt-systemjs-builder/-/grunt-systemjs-builder-1.0.0.tgz", + "integrity": "sha1-XY58vspbNeK3tr0ALpqdfgPX3s0=", + "dev": true, + "requires": { + "systemjs-builder": "0.14.11 - 0.16.x" + } + }, + "gzip-size": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-1.0.0.tgz", + "integrity": "sha1-Zs+LEBBHInuVus5uodoMF37Vwi8=", + "dev": true, + "requires": { + "browserify-zlib": "^0.1.4", + "concat-stream": "^1.4.1" + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", + "dev": true + }, + "har-validator": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz", + "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==", + "dev": true, + "requires": { + "ajv": "^6.5.5", + "har-schema": "^2.0.0" + } + }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1" + } + }, + "has-ansi": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", + "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true + }, + "has-symbols": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", + "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", + "dev": true + }, + "he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true + }, + "home-or-tmp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/home-or-tmp/-/home-or-tmp-2.0.0.tgz", + "integrity": "sha1-42w/LSyufXRqhX440Y1fMqeILbg=", + "dev": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.1" + } + }, + "hooker": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/hooker/-/hooker-0.2.3.tgz", + "integrity": "sha1-uDT3I8xKJCqmWWNFnfbZhMXT2Vk=", + "dev": true + }, + "hosted-git-info": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.5.tgz", + "integrity": "sha512-kssjab8CvdXfcXMXVcvsXum4Hwdq9XGtRD3TteMEvEbq0LXyiNQr6AprqKqfeaDXze7SxWvRxdpwE6ku7ikLkg==", + "dev": true + }, + "html-encoding-sniffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz", + "integrity": "sha512-71lZziiDnsuabfdYiUeWdCVyKuqwWi23L8YeIgV9jSSZHCtb6wB1BKWooH7L3tn4/FuZJMVWyNaIDr4RGmaSYw==", + "dev": true, + "requires": { + "whatwg-encoding": "^1.0.1" + } + }, + "http-parser-js": { + "version": "0.4.10", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.4.10.tgz", + "integrity": "sha1-ksnBN0w1CF912zWexWzCV8u5P6Q=", + "dev": true + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + } + }, + "iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "indent-string": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", + "integrity": "sha1-ji1INIdCEhtKghi3oTfppSBJ3IA=", + "dev": true, + "requires": { + "repeating": "^2.0.0" + } + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dev": true, + "requires": { + "loose-envify": "^1.0.0" + } + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "is-callable": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.5.tgz", + "integrity": "sha512-ESKv5sMCJB2jnHTWZ3O5itG+O128Hsus4K4Qh1h2/cgn2vbgnLSVqfV46AeJA9D5EeeLa9w81KUXMtn34zhX+Q==", + "dev": true + }, + "is-date-object": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", + "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", + "dev": true + }, + "is-finite": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.0.2.tgz", + "integrity": "sha1-zGZ3aVYCvlUO8R6LSqYwU0K20Ko=", + "dev": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "is-regex": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.5.tgz", + "integrity": "sha512-vlKW17SNq44owv5AQR3Cq0bQPEb8+kF3UKZ2fiZNOWtztYE5i0CzCZxFDwO58qAOWtxdBRVO/V5Qin1wjCqFYQ==", + "dev": true, + "requires": { + "has": "^1.0.3" + } + }, + "is-symbol": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", + "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", + "dev": true, + "requires": { + "has-symbols": "^1.0.1" + } + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "is-utf8": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", + "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", + "dev": true + }, + "js-tokens": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz", + "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=", + "dev": true + }, + "js-yaml": { + "version": "3.13.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", + "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", + "dev": true + }, + "jsdom": { + "version": "9.12.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-9.12.0.tgz", + "integrity": "sha1-6MVG//ywbADUgzyoRBD+1/igl9Q=", + "dev": true, + "requires": { + "abab": "^1.0.3", + "acorn": "^4.0.4", + "acorn-globals": "^3.1.0", + "array-equal": "^1.0.0", + "content-type-parser": "^1.0.1", + "cssom": ">= 0.3.2 < 0.4.0", + "cssstyle": ">= 0.2.37 < 0.3.0", + "escodegen": "^1.6.1", + "html-encoding-sniffer": "^1.0.1", + "nwmatcher": ">= 1.3.9 < 2.0.0", + "parse5": "^1.5.1", + "request": "^2.79.0", + "sax": "^1.2.1", + "symbol-tree": "^3.2.1", + "tough-cookie": "^2.3.2", + "webidl-conversions": "^4.0.0", + "whatwg-encoding": "^1.0.1", + "whatwg-url": "^4.3.0", + "xml-name-validator": "^2.0.1" + } + }, + "jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", + "dev": true + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", + "dev": true + }, + "json5": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", + "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", + "dev": true + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + }, + "lazy-cache": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-1.0.4.tgz", + "integrity": "sha1-odePw6UEdMuAhF07O24dpJpEbo4=", + "dev": true + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "livereload-js": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/livereload-js/-/livereload-js-2.4.0.tgz", + "integrity": "sha512-XPQH8Z2GDP/Hwz2PCDrh2mth4yFejwA1OZ/81Ti3LgKyhDcEjsSsqFWZojHG0va/duGd+WyosY7eXLDoOyqcPw==", + "dev": true + }, + "load-grunt-tasks": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/load-grunt-tasks/-/load-grunt-tasks-3.5.2.tgz", + "integrity": "sha1-ByhWEYD9IP+KaSdQWFL8WKrqDIg=", + "dev": true, + "requires": { + "arrify": "^1.0.0", + "multimatch": "^2.0.0", + "pkg-up": "^1.0.0", + "resolve-pkg": "^0.1.0" + } + }, + "load-json-file": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", + "integrity": "sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^2.2.0", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0", + "strip-bom": "^2.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "dependencies": { + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + } + } + }, + "lodash": { + "version": "4.17.13", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.13.tgz", + "integrity": "sha512-vm3/XWXfWtRua0FkUyEHBZy8kCPjErNBT9fJx8Zvs+U6zjqPbTUOpkaoum3O5uiA8sm+yNMHXfYkTUHFoMxFNA==" + }, + "log-symbols": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-2.2.0.tgz", + "integrity": "sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg==", + "dev": true, + "requires": { + "chalk": "^2.0.1" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "longest": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", + "integrity": "sha1-MKCy2jj3N3DoKUoNIuZiXtd9AJc=", + "dev": true + }, + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dev": true, + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, + "loud-rejection": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", + "integrity": "sha1-W0b4AUft7leIcPCG0Eghz5mOVR8=", + "dev": true, + "requires": { + "currently-unhandled": "^0.4.1", + "signal-exit": "^3.0.0" + } + }, + "map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", + "dev": true + }, + "maxmin": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/maxmin/-/maxmin-1.1.0.tgz", + "integrity": "sha1-cTZehKmd2Piz99X94vANHn9zvmE=", + "dev": true, + "requires": { + "chalk": "^1.0.0", + "figures": "^1.0.1", + "gzip-size": "^1.0.0", + "pretty-bytes": "^1.0.0" + } + }, + "meow": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-3.7.0.tgz", + "integrity": "sha1-cstmi0JSKCkKu/qFaJJYcwioAfs=", + "dev": true, + "requires": { + "camelcase-keys": "^2.0.0", + "decamelize": "^1.1.2", + "loud-rejection": "^1.0.0", + "map-obj": "^1.0.1", + "minimist": "^1.1.3", + "normalize-package-data": "^2.3.4", + "object-assign": "^4.0.1", + "read-pkg-up": "^1.0.1", + "redent": "^1.0.0", + "trim-newlines": "^1.0.0" + } + }, + "mime-db": { + "version": "1.42.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.42.0.tgz", + "integrity": "sha512-UbfJCR4UAVRNgMpfImz05smAXK7+c+ZntjaA26ANtkXLlOe947Aag5zdIcKQULAiF9Cq4WxBi9jUs5zkA84bYQ==", + "dev": true + }, + "mime-types": { + "version": "2.1.25", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.25.tgz", + "integrity": "sha512-5KhStqB5xpTAeGqKBAMgwaYMnQik7teQN4IAzC7npDv6kzeU6prfkR67bc87J1kWMPGkoaZSq1npmexMgkmEVg==", + "dev": true, + "requires": { + "mime-db": "1.42.0" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "dev": true, + "requires": { + "minimist": "0.0.8" + }, + "dependencies": { + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "dev": true + } + } + }, + "mocha": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-6.2.2.tgz", + "integrity": "sha512-FgDS9Re79yU1xz5d+C4rv1G7QagNGHZ+iXF81hO8zY35YZZcLEsJVfFolfsqKFWunATEvNzMK0r/CwWd/szO9A==", + "dev": true, + "requires": { + "ansi-colors": "3.2.3", + "browser-stdout": "1.3.1", + "debug": "3.2.6", + "diff": "3.5.0", + "escape-string-regexp": "1.0.5", + "find-up": "3.0.0", + "glob": "7.1.3", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "3.13.1", + "log-symbols": "2.2.0", + "minimatch": "3.0.4", + "mkdirp": "0.5.1", + "ms": "2.1.1", + "node-environment-flags": "1.0.5", + "object.assign": "4.1.0", + "strip-json-comments": "2.0.1", + "supports-color": "6.0.0", + "which": "1.3.1", + "wide-align": "1.1.3", + "yargs": "13.3.0", + "yargs-parser": "13.1.1", + "yargs-unparser": "1.6.0" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dev": true, + "requires": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "debug": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", + "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "glob": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz", + "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", + "dev": true + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + }, + "supports-color": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.0.0.tgz", + "integrity": "sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "yargs": { + "version": "13.3.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.0.tgz", + "integrity": "sha512-2eehun/8ALW8TLoIl7MVaRUrg+yCnenu8B4kBlRxj3GJGDKU1Og7sMXPNm1BYyM1DOJmTZ4YeN/Nwxv+8XJsUA==", + "dev": true, + "requires": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.1" + } + } + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "multimatch": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/multimatch/-/multimatch-2.1.0.tgz", + "integrity": "sha1-nHkGoi+0wCkZ4vX3UWG0zb1LKis=", + "dev": true, + "requires": { + "array-differ": "^1.0.0", + "array-union": "^1.0.1", + "arrify": "^1.0.0", + "minimatch": "^3.0.0" + } + }, + "next-tick": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", + "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=", + "dev": true + }, + "node-environment-flags": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/node-environment-flags/-/node-environment-flags-1.0.5.tgz", + "integrity": "sha512-VNYPRfGfmZLx0Ye20jWzHUjyTW/c+6Wq+iLhDzUI4XmhrDd9l/FozXV3F2xOaXjvp0co0+v1YSR3CMP6g+VvLQ==", + "dev": true, + "requires": { + "object.getownpropertydescriptors": "^2.0.3", + "semver": "^5.7.0" + } + }, + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1" + } + }, + "normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "dev": true + }, + "nwmatcher": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/nwmatcher/-/nwmatcher-1.4.4.tgz", + "integrity": "sha512-3iuY4N5dhgMpCUrOVnuAdGrgxVqV2cJpM+XNccjR2DKOB1RUP0aA+wGXEiNziG/UKboFyGBIoKOaNlJxx8bciQ==", + "dev": true + }, + "oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", + "dev": true + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object-inspect": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.7.0.tgz", + "integrity": "sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw==", + "dev": true + }, + "object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true + }, + "object.assign": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", + "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", + "dev": true, + "requires": { + "define-properties": "^1.1.2", + "function-bind": "^1.1.1", + "has-symbols": "^1.0.0", + "object-keys": "^1.0.11" + } + }, + "object.getownpropertydescriptors": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.0.tgz", + "integrity": "sha512-Z53Oah9A3TdLoblT7VKJaTDdXdT+lQO+cNpKVnya5JDe9uLvzu1YyY1yFDFrcxrlRgWrEFH0jJtD/IbuwjcEVg==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.0-next.1" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dev": true, + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + } + }, + "os-homedir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", + "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=", + "dev": true + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "dev": true + }, + "p-limit": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.1.tgz", + "integrity": "sha512-85Tk+90UCVWvbDavCLKPOLC9vvY8OwEX/RtKF+/1OADJMVlFfEHOiMTPVyxg7mk/dKa+ipdHm0OUkTvCpMTuwg==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "pako": { + "version": "0.2.9", + "resolved": "https://registry.npmjs.org/pako/-/pako-0.2.9.tgz", + "integrity": "sha1-8/dSL073gjSNqBYbrZ7P1Rv4OnU=", + "dev": true + }, + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dev": true, + "requires": { + "error-ex": "^1.2.0" + } + }, + "parse5": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-1.5.1.tgz", + "integrity": "sha1-m387DeMr543CQBsXVzzK8Pb1nZQ=", + "dev": true + }, + "path-exists": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz", + "integrity": "sha1-D+tsZPD8UY2adU3V77YscCJ2H0s=", + "dev": true, + "requires": { + "pinkie-promise": "^2.0.0" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-parse": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", + "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==", + "dev": true + }, + "path-type": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", + "integrity": "sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0" + } + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=", + "dev": true + }, + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", + "dev": true + }, + "pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", + "dev": true + }, + "pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "dev": true, + "requires": { + "pinkie": "^2.0.0" + } + }, + "pkg-up": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-1.0.0.tgz", + "integrity": "sha1-Pgj7RhUlxEIWJKM7n35tCvWwWiY=", + "dev": true, + "requires": { + "find-up": "^1.0.0" + } + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "dev": true + }, + "pretty-bytes": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-1.0.4.tgz", + "integrity": "sha1-CiLoIQYJrTVUL4yNXSFZr/B1HIQ=", + "dev": true, + "requires": { + "get-stdin": "^4.0.1", + "meow": "^3.1.0" + } + }, + "private": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", + "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", + "dev": true + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "prunk": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/prunk/-/prunk-1.3.1.tgz", + "integrity": "sha512-nLpTxQCqaKzdpzA344aK8u1wpVcnT0OD9oruH281TeqkqnxCWW4dD8Vn65mQWlfwPO8yWFyeK2h2qcw2N7hveA==", + "dev": true + }, + "psl": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.6.0.tgz", + "integrity": "sha512-SYKKmVel98NCOYXpkwUqZqh0ahZeeKfmisiLIcEZdsb+WbLv02g/dI5BUmZnIyOe7RzZtLax81nnb2HbvC2tzA==", + "dev": true + }, + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "dev": true + }, + "q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", + "dev": true + }, + "qs": { + "version": "6.9.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.9.1.tgz", + "integrity": "sha512-Cxm7/SS/y/Z3MHWSxXb8lIFqgqBowP5JMlTUFyJN88y0SGQhVmZnqFK/PeuMX9LzUyWsqqhNxIyg0jlzq946yA==", + "dev": true + }, + "raw-body": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-1.1.7.tgz", + "integrity": "sha1-HQJ8K/oRasxmI7yo8AAWVyqH1CU=", + "dev": true, + "requires": { + "bytes": "1", + "string_decoder": "0.10" + }, + "dependencies": { + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + } + } + }, + "read-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", + "integrity": "sha1-9f+qXs0pyzHAR0vKfXVra7KePyg=", + "dev": true, + "requires": { + "load-json-file": "^1.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^1.0.0" + } + }, + "read-pkg-up": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz", + "integrity": "sha1-nWPBMnbAZZGNV/ACpX9AobZD+wI=", + "dev": true, + "requires": { + "find-up": "^1.0.0", + "read-pkg": "^1.0.0" + } + }, + "readable-stream": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", + "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "redent": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz", + "integrity": "sha1-z5Fqsf1fHxbfsggi3W7H9zDCr94=", + "dev": true, + "requires": { + "indent-string": "^2.1.0", + "strip-indent": "^1.0.1" + } + }, + "regenerate": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", + "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==", + "dev": true + }, + "regenerator-runtime": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", + "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==", + "dev": true + }, + "regenerator-transform": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", + "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", + "dev": true, + "requires": { + "babel-runtime": "^6.18.0", + "babel-types": "^6.19.0", + "private": "^0.1.6" + } + }, + "regexpu-core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-2.0.0.tgz", + "integrity": "sha1-SdA4g3uNz4v6W5pCE5k45uoq4kA=", + "dev": true, + "requires": { + "regenerate": "^1.2.1", + "regjsgen": "^0.2.0", + "regjsparser": "^0.1.4" + } + }, + "regjsgen": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.2.0.tgz", + "integrity": "sha1-bAFq3qxVT3WCP+N6wFuS1aTtsfc=", + "dev": true + }, + "regjsparser": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.1.5.tgz", + "integrity": "sha1-fuj4Tcb6eS0/0K4ijSS9lJ6tIFw=", + "dev": true, + "requires": { + "jsesc": "~0.5.0" + } + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true + }, + "repeating": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", + "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=", + "dev": true, + "requires": { + "is-finite": "^1.0.0" + } + }, + "request": { + "version": "2.88.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz", + "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==", + "dev": true, + "requires": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.0", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.4.3", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + }, + "dependencies": { + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", + "dev": true + }, + "qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", + "dev": true + }, + "tough-cookie": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz", + "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==", + "dev": true, + "requires": { + "psl": "^1.1.24", + "punycode": "^1.4.1" + } + } + } + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true + }, + "require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true + }, + "resolve": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.14.1.tgz", + "integrity": "sha512-fn5Wobh4cxbLzuHaE+nphztHy43/b++4M6SsGFC2gB8uYwf0C8LcarfCz1un7UTW8OFQg9iNjZ4xpcFVGebDPg==", + "dev": true, + "requires": { + "path-parse": "^1.0.6" + } + }, + "resolve-from": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-2.0.0.tgz", + "integrity": "sha1-lICrIOlP+h2egKgEx+oUdhGWa1c=", + "dev": true + }, + "resolve-pkg": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/resolve-pkg/-/resolve-pkg-0.1.0.tgz", + "integrity": "sha1-AsyZNBDik2livZcWahsHfalyVTE=", + "dev": true, + "requires": { + "resolve-from": "^2.0.0" + } + }, + "right-align": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/right-align/-/right-align-0.1.3.tgz", + "integrity": "sha1-YTObci/mo1FWiSENJOFMlhSGE+8=", + "dev": true, + "requires": { + "align-text": "^0.1.1" + } + }, + "rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "dev": true, + "requires": { + "glob": "^7.1.3" + }, + "dependencies": { + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } + } + }, + "rollup": { + "version": "0.58.2", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-0.58.2.tgz", + "integrity": "sha512-RZVvCWm9BHOYloaE6LLiE/ibpjv1CmI8F8k0B0Cp+q1eezo3cswszJH1DN0djgzSlo0hjuuCmyeI+1XOYLl4wg==", + "dev": true, + "requires": { + "@types/estree": "0.0.38", + "@types/node": "*" + } + }, + "rsvp": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/rsvp/-/rsvp-3.6.2.tgz", + "integrity": "sha512-OfWGQTb9vnwRjwtA2QwpG2ICclHC3pgXZO5xt8H2EfgDquO0qVdSb5T88L4qJVAEugbS56pAuV4XZM58UX8ulw==", + "dev": true + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "safe-json-parse": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/safe-json-parse/-/safe-json-parse-1.0.1.tgz", + "integrity": "sha1-PnZyPjjf3aE8mx0poeB//uSzC1c=", + "dev": true + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "sax": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", + "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==", + "dev": true + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "slash": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", + "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", + "dev": true + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + }, + "source-map-support": { + "version": "0.4.18", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", + "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", + "dev": true, + "requires": { + "source-map": "^0.5.6" + } + }, + "spdx-correct": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", + "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==", + "dev": true, + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz", + "integrity": "sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA==", + "dev": true + }, + "spdx-expression-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", + "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", + "dev": true, + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz", + "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==", + "dev": true + }, + "sprintf-js": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", + "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==", + "dev": true + }, + "sshpk": { + "version": "1.16.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", + "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", + "dev": true, + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + }, + "string-template": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/string-template/-/string-template-0.2.1.tgz", + "integrity": "sha1-QpMuWYo1LQH8IuwzZ9nYTuxsmt0=", + "dev": true + }, + "string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + }, + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + }, + "string.prototype.trimleft": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.1.tgz", + "integrity": "sha512-iu2AGd3PuP5Rp7x2kEZCrB2Nf41ehzh+goo8TV7z8/XDBbsvc6HQIlUl9RjkZ4oyrW1XM5UwlGl1oVEaDjg6Ag==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "function-bind": "^1.1.1" + } + }, + "string.prototype.trimright": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.1.tgz", + "integrity": "sha512-qFvWL3/+QIgZXVmJBfpHmxLB7xsUXz6HsUmP8+5dRaC3Q7oKUv9Vo6aMCRZC1smrtyECFsIT30PqBJ1gTjAs+g==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "function-bind": "^1.1.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-bom": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", + "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=", + "dev": true, + "requires": { + "is-utf8": "^0.2.0" + } + }, + "strip-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-1.0.1.tgz", + "integrity": "sha1-DHlipq3vp7vUrDZkYKY4VSrhoKI=", + "dev": true, + "requires": { + "get-stdin": "^4.0.1" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "dev": true + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true + }, + "symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true + }, + "systemjs": { + "version": "0.19.47", + "resolved": "https://registry.npmjs.org/systemjs/-/systemjs-0.19.47.tgz", + "integrity": "sha1-yMk5NxgPP1SBx2nNJyB2P7SjHG8=", + "dev": true, + "requires": { + "when": "^3.7.5" + } + }, + "systemjs-builder": { + "version": "0.16.15", + "resolved": "https://registry.npmjs.org/systemjs-builder/-/systemjs-builder-0.16.15.tgz", + "integrity": "sha512-C18G//KWWwQpstAVBUDt0YbbqvSFVVtr0MFqtf2zB4U/cePOA00Btcja++mzlFLMnepVpDv0GdtfE/6A8lrxeA==", + "dev": true, + "requires": { + "babel-core": "^6.24.1", + "babel-plugin-syntax-dynamic-import": "^6.18.0", + "babel-plugin-transform-amd-system-wrapper": "^0.3.7", + "babel-plugin-transform-cjs-system-wrapper": "^0.6.2", + "babel-plugin-transform-es2015-modules-systemjs": "^6.6.5", + "babel-plugin-transform-global-system-wrapper": "^0.3.4", + "babel-plugin-transform-system-register": "^0.0.1", + "bluebird": "^3.3.4", + "data-uri-to-buffer": "0.0.4", + "es6-template-strings": "^2.0.0", + "glob": "^7.0.3", + "mkdirp": "^0.5.1", + "rollup": "^0.58.2", + "source-map": "^0.5.3", + "systemjs": "^0.19.46", + "terser": "3.8.1", + "traceur": "0.0.105" + } + }, + "terser": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-3.8.1.tgz", + "integrity": "sha512-FRin3gKQ0vm0xPPLuxw1FqpVgv1b2pBpYCaFb5qe6A7sD749Fnq1VbDiX3CEFM0BV0fqDzFtBfgmxhxCdzKQIg==", + "dev": true, + "requires": { + "commander": "~2.16.0", + "source-map": "~0.6.1", + "source-map-support": "~0.5.6" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + }, + "source-map-support": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.16.tgz", + "integrity": "sha512-efyLRJDr68D9hBBNIPWFjhpFzURh+KJykQwvMyW5UiZzYwoF6l4YMMDIJJEyFWxWCqfyxLzz6tSfUFR+kXXsVQ==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + } + } + }, + "tiny-lr": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tiny-lr/-/tiny-lr-1.1.1.tgz", + "integrity": "sha512-44yhA3tsaRoMOjQQ+5v5mVdqef+kH6Qze9jTpqtVufgYjYt08zyZAwNwwVBj3i1rJMnR52IxOW0LK0vBzgAkuA==", + "dev": true, + "requires": { + "body": "^5.1.0", + "debug": "^3.1.0", + "faye-websocket": "~0.10.0", + "livereload-js": "^2.3.0", + "object-assign": "^4.1.0", + "qs": "^6.4.0" + }, + "dependencies": { + "debug": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", + "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "to-fast-properties": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", + "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc=", + "dev": true + }, + "tough-cookie": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "dev": true, + "requires": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + } + }, + "tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=", + "dev": true + }, + "traceur": { + "version": "0.0.105", + "resolved": "https://registry.npmjs.org/traceur/-/traceur-0.0.105.tgz", + "integrity": "sha1-XPne6D1rd4YcPWxE1ThZrterBHk=", + "dev": true, + "requires": { + "commander": "2.9.x", + "glob": "5.0.x", + "rsvp": "^3.0.13", + "semver": "^4.3.3", + "source-map-support": "~0.2.8" + }, + "dependencies": { + "commander": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.9.0.tgz", + "integrity": "sha1-nJkJQXbhIkDLItbFFGCYQA/g99Q=", + "dev": true, + "requires": { + "graceful-readlink": ">= 1.0.0" + } + }, + "glob": { + "version": "5.0.15", + "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", + "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "dev": true, + "requires": { + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "2 || 3", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "semver": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/semver/-/semver-4.3.6.tgz", + "integrity": "sha1-MAvG4OhjdPe6YQaLWx7NV/xlMto=", + "dev": true + }, + "source-map": { + "version": "0.1.32", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.1.32.tgz", + "integrity": "sha1-yLbBZ3l7pHQKjqMyUhYv8IWRsmY=", + "dev": true, + "requires": { + "amdefine": ">=0.0.4" + } + }, + "source-map-support": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.2.10.tgz", + "integrity": "sha1-6lo5AKHByyUJagrozFwrSxDe09w=", + "dev": true, + "requires": { + "source-map": "0.1.32" + } + } + } + }, + "trim-newlines": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", + "integrity": "sha1-WIeWa7WCpFA6QetST301ARgVphM=", + "dev": true + }, + "trim-right": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/trim-right/-/trim-right-1.0.1.tgz", + "integrity": "sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM=", + "dev": true + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "dev": true, + "requires": { + "safe-buffer": "^5.0.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", + "dev": true + }, + "type": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", + "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==", + "dev": true + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2" + } + }, + "type-detect": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-1.0.0.tgz", + "integrity": "sha1-diIXzAbbJY7EiQihKY6LlRIejqI=", + "dev": true + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", + "dev": true + }, + "uglify-js": { + "version": "2.8.29", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-2.8.29.tgz", + "integrity": "sha1-KcVzMUgFe7Th913zW3qcty5qWd0=", + "dev": true, + "requires": { + "source-map": "~0.5.1", + "uglify-to-browserify": "~1.0.0", + "yargs": "~3.10.0" + } + }, + "uglify-to-browserify": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz", + "integrity": "sha1-bgkk1r2mta/jSeOabWMoUKD4grc=", + "dev": true, + "optional": true + }, + "underscore.string": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/underscore.string/-/underscore.string-3.3.5.tgz", + "integrity": "sha512-g+dpmgn+XBneLmXXo+sGlW5xQEt4ErkS3mgeN2GFbremYeMBSJKr9Wf2KJplQVaiPY/f7FN6atosWYNm9ovrYg==", + "dev": true, + "requires": { + "sprintf-js": "^1.0.3", + "util-deprecate": "^1.0.2" + } + }, + "uri-js": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", + "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "dev": true, + "requires": { + "punycode": "^2.1.0" + } + }, + "uri-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/uri-path/-/uri-path-1.0.0.tgz", + "integrity": "sha1-l0fwGDWJM8Md4PzP2C0TjmcmLjI=", + "dev": true + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "uuid": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.3.tgz", + "integrity": "sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ==", + "dev": true + }, + "validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "webidl-conversions": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", + "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==", + "dev": true + }, + "websocket-driver": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.3.tgz", + "integrity": "sha512-bpxWlvbbB459Mlipc5GBzzZwhoZgGEZLuqPaR0INBGnPAY1vdBX6hPnoFXiw+3yWxDuHyQjO2oXTMyS8A5haFg==", + "dev": true, + "requires": { + "http-parser-js": ">=0.4.0 <0.4.11", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + } + }, + "websocket-extensions": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.3.tgz", + "integrity": "sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==", + "dev": true + }, + "whatwg-encoding": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz", + "integrity": "sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw==", + "dev": true, + "requires": { + "iconv-lite": "0.4.24" + } + }, + "whatwg-url": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-4.8.0.tgz", + "integrity": "sha1-0pgaqRSMHgCkHFphMRZqtGg7vMA=", + "dev": true, + "requires": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + }, + "dependencies": { + "webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=", + "dev": true + } + } + }, + "when": { + "version": "3.7.8", + "resolved": "https://registry.npmjs.org/when/-/when-3.7.8.tgz", + "integrity": "sha1-xxMLan6gRpPoQs3J56Hyqjmjn4I=", + "dev": true + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", + "dev": true + }, + "wide-align": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", + "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "dev": true, + "requires": { + "string-width": "^1.0.2 || 2" + } + }, + "window-size": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.0.tgz", + "integrity": "sha1-VDjNLqk7IC76Ohn+iIeu58lPnJ0=", + "dev": true + }, + "word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "dev": true + }, + "wordwrap": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", + "integrity": "sha1-t5Zpu0LstAn4PVg8rVLKF+qhZD8=", + "dev": true + }, + "wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + } + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "xml-name-validator": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-2.0.1.tgz", + "integrity": "sha1-TYuPHszTQZqjYgYb7O9RXh5VljU=", + "dev": true + }, + "y18n": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", + "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==", + "dev": true + }, + "yargs": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.10.0.tgz", + "integrity": "sha1-9+572FfdfB0tOMDnTvvWgdFDH9E=", + "dev": true, + "requires": { + "camelcase": "^1.0.2", + "cliui": "^2.1.0", + "decamelize": "^1.0.0", + "window-size": "0.1.0" + }, + "dependencies": { + "camelcase": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz", + "integrity": "sha1-m7UwTS4LVmmLLHWLCKPqqdqlijk=", + "dev": true + } + } + }, + "yargs-parser": { + "version": "13.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.1.tgz", + "integrity": "sha512-oVAVsHz6uFrg3XQheFII8ESO2ssAf9luWuAd6Wexsu4F3OtIW0o8IribPXYrD4WC24LWtPrJlGy87y5udK+dxQ==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + }, + "dependencies": { + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + } + } + }, + "yargs-unparser": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-1.6.0.tgz", + "integrity": "sha512-W9tKgmSn0DpSatfri0nx52Joq5hVXgeLiqR/5G0sZNDoLZFOr/xjBUDcShCOGNsBnEMNo1KAMBkTej1Hm62HTw==", + "dev": true, + "requires": { + "flat": "^4.1.0", + "lodash": "^4.17.15", + "yargs": "^13.3.0" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dev": true, + "requires": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "lodash": { + "version": "4.17.15", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", + "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==", + "dev": true + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + }, + "yargs": { + "version": "13.3.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.0.tgz", + "integrity": "sha512-2eehun/8ALW8TLoIl7MVaRUrg+yCnenu8B4kBlRxj3GJGDKU1Og7sMXPNm1BYyM1DOJmTZ4YeN/Nwxv+8XJsUA==", + "dev": true, + "requires": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.1" + } + } + } + }, + "yarn": { + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/yarn/-/yarn-1.22.0.tgz", + "integrity": "sha512-KMHP/Jq53jZKTY9iTUt3dIVl/be6UPs2INo96+BnZHLKxYNTfwMmlgHTaMWyGZoO74RI4AIFvnWhYrXq2USJkg==" + } + } +} diff --git a/src/connector/grafana/tdengine/package.json b/src/connector/grafana/tdengine/package.json new file mode 100644 index 0000000000000000000000000000000000000000..0eb7a76be6cfccd81f680f179c8e59499690201b --- /dev/null +++ b/src/connector/grafana/tdengine/package.json @@ -0,0 +1,45 @@ +{ + "name": "TDengine", + "private": false, + "version": "1.0.0", + "description": "grafana datasource plugin for tdengine", + "scripts": { + "build": "./node_modules/grunt-cli/bin/grunt", + "test": "./node_modules/grunt-cli/bin/grunt mochaTest" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/taosdata/TDengine.git" + }, + "author": "https://www.taosdata.com", + "license": "AGPL 3.0", + "bugs": { + "url": "https://github.com/taosdata/TDengine/issues" + }, + "engineStrict": true, + "devDependencies": { + "babel": "^6.23.0", + "babel-plugin-transform-object-rest-spread": "^6.26.0", + "babel-preset-env": "^1.7.0", + "chai": "~3.5.0", + "grunt": "^1.0.4", + "grunt-babel": "~6.0.0", + "grunt-cli": "^1.2.0", + "grunt-contrib-clean": "^1.1.0", + "grunt-contrib-copy": "^1.0.0", + "grunt-contrib-uglify": "^2.3.0", + "grunt-contrib-watch": "^1.0.0", + "grunt-mocha-test": "^0.13.2", + "grunt-systemjs-builder": "^1.0.0", + "jsdom": "~9.12.0", + "load-grunt-tasks": "^3.5.2", + "mocha": "^6.2.2", + "prunk": "^1.3.0", + "q": "^1.5.0" + }, + "dependencies": { + "lodash": "^4.17.13", + "yarn": "^1.22.0" + }, + "homepage": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine" +} diff --git a/src/connector/grafana/tdengine/query_ctrl.js b/src/connector/grafana/tdengine/query_ctrl.js deleted file mode 100644 index fc9737238f1b637c0605d733e7bc04f770f5beef..0000000000000000000000000000000000000000 --- a/src/connector/grafana/tdengine/query_ctrl.js +++ /dev/null @@ -1,91 +0,0 @@ -'use strict'; - -System.register(['app/plugins/sdk'], function (_export, _context) { - "use strict"; - - var QueryCtrl, _createClass, GenericDatasourceQueryCtrl; - - function _classCallCheck(instance, Constructor) { - if (!(instance instanceof Constructor)) { - throw new TypeError("Cannot call a class as a function"); - } - } - - function _possibleConstructorReturn(self, call) { - if (!self) { - throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); - } - - return call && (typeof call === "object" || typeof call === "function") ? call : self; - } - - function _inherits(subClass, superClass) { - if (typeof superClass !== "function" && superClass !== null) { - throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); - } - - subClass.prototype = Object.create(superClass && superClass.prototype, { - constructor: { - value: subClass, - enumerable: false, - writable: true, - configurable: true - } - }); - if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; - } - - return { - setters: [function (_appPluginsSdk) { - QueryCtrl = _appPluginsSdk.QueryCtrl; - }, function (_cssQueryEditorCss) {}], - execute: function () { - _createClass = function () { - function defineProperties(target, props) { - for (var i = 0; i < props.length; i++) { - var descriptor = props[i]; - descriptor.enumerable = descriptor.enumerable || false; - descriptor.configurable = true; - if ("value" in descriptor) descriptor.writable = true; - Object.defineProperty(target, descriptor.key, descriptor); - } - } - - return function (Constructor, protoProps, staticProps) { - if (protoProps) defineProperties(Constructor.prototype, protoProps); - if (staticProps) defineProperties(Constructor, staticProps); - return Constructor; - }; - }(); - - _export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl = function (_QueryCtrl) { - _inherits(GenericDatasourceQueryCtrl, _QueryCtrl); - - function GenericDatasourceQueryCtrl($scope, $injector) { - _classCallCheck(this, GenericDatasourceQueryCtrl); - - var _this = _possibleConstructorReturn(this, (GenericDatasourceQueryCtrl.__proto__ || Object.getPrototypeOf(GenericDatasourceQueryCtrl)).call(this, $scope, $injector)); - - _this.scope = $scope; - return _this; - } - - _createClass(GenericDatasourceQueryCtrl, [{ - key: 'generateSQL', - value: function generateSQL(query) { - //this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.raw.from, this.panelCtrl.range.raw.to, this.panelCtrl.intervalMs); - this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.from.toISOString(), this.panelCtrl.range.to.toISOString(), this.panelCtrl.intervalMs); - this.showGenerateSQL = !this.showGenerateSQL; - } - }]); - - return GenericDatasourceQueryCtrl; - }(QueryCtrl)); - - _export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl); - - GenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html'; - } - }; -}); -//# sourceMappingURL=query_ctrl.js.map diff --git a/src/connector/grafana/tdengine/spec/datasource_spec.js b/src/connector/grafana/tdengine/spec/datasource_spec.js new file mode 100755 index 0000000000000000000000000000000000000000..201b6fecb4bddaca2219f5e9a1858a53222cae9a --- /dev/null +++ b/src/connector/grafana/tdengine/spec/datasource_spec.js @@ -0,0 +1,22 @@ +import {Datasource} from "../module"; +import Q from "q"; + +describe('GenericDatasource', function() { + var ctx = {}; + + beforeEach(function() { + ctx.$q = Q; + ctx.backendSrv = {}; + ctx.templateSrv = {}; + ctx.ds = new Datasource({}, ctx.$q, ctx.backendSrv, ctx.templateSrv); + }); + + it('should return an empty array when no targets are set', function(done) { + ctx.ds.query({targets: []}).then(function(result) { + expect(result.data).to.have.length(0); + done(); + }); + }); + + +}); diff --git a/src/connector/grafana/tdengine/spec/test-main.js b/src/connector/grafana/tdengine/spec/test-main.js new file mode 100755 index 0000000000000000000000000000000000000000..3935b6fd4536c2c8cb8db06dd2c9af909adec0b9 --- /dev/null +++ b/src/connector/grafana/tdengine/spec/test-main.js @@ -0,0 +1,20 @@ +import prunk from 'prunk'; +import {jsdom} from 'jsdom'; +import chai from 'chai'; + +// Mock Grafana modules that are not available outside of the core project +// Required for loading module.js +prunk.mock('./css/query-editor.css!', 'no css, dude.'); +prunk.mock('app/plugins/sdk', { + QueryCtrl: null +}); + +// Setup jsdom +// Required for loading angularjs +global.document = jsdom(''); +global.window = global.document.parentWindow; + +// Setup Chai +chai.should(); +global.assert = chai.assert; +global.expect = chai.expect; diff --git a/src/connector/grafana/tdengine/src/css/query-editor.css b/src/connector/grafana/tdengine/src/css/query-editor.css new file mode 100644 index 0000000000000000000000000000000000000000..3b678b9f3689d2131d2224826872b8a75cc1c9fe --- /dev/null +++ b/src/connector/grafana/tdengine/src/css/query-editor.css @@ -0,0 +1,3 @@ +.generic-datasource-query-row .query-keyword { + width: 75px; +} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/src/datasource.js b/src/connector/grafana/tdengine/src/datasource.js new file mode 100644 index 0000000000000000000000000000000000000000..08426bb3df6a8a8d78f6289284f56ac83c4713c8 --- /dev/null +++ b/src/connector/grafana/tdengine/src/datasource.js @@ -0,0 +1,127 @@ +import _ from "lodash"; + +export class GenericDatasource { + + constructor(instanceSettings, $q, backendSrv, templateSrv) { + this.type = instanceSettings.type; + this.url = instanceSettings.url; + this.name = instanceSettings.name; + this.q = $q; + this.backendSrv = backendSrv; + this.templateSrv = templateSrv; + this.headers = {'Content-Type': 'application/json'}; + this.headers.Authorization = this.getAuthorization(instanceSettings.jsonData); + } + + query(options) { + var targets = this.buildQueryParameters(options); + + if (targets.length <= 0) { + return this.q.when({data: []}); + } + + return this.doRequest({ + url: this.url + '/grafana/query', + data: targets, + method: 'POST' + }); + } + + testDatasource() { + return this.doRequest({ + url: this.url + '/grafana/heartbeat', + method: 'GET', + }).then(response => { + if (response.status === 200) { + return { status: "success", message: "TDengine Data source is working", title: "Success" }; + } + }); + } + + doRequest(options) { + options.headers = this.headers; + + return this.backendSrv.datasourceRequest(options); + } + + buildQueryParameters(options) { + + var targets = _.map(options.targets, target => { + return { + refId: target.refId, + alias: this.generateAlias(options, target), + sql: this.generateSql(options, target) + }; + }); + + return targets; + } + + encode(input) { + var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + var output = ""; + var chr1, chr2, chr3, enc1, enc2, enc3, enc4; + var i = 0; + while (i < input.length) { + chr1 = input.charCodeAt(i++); + chr2 = input.charCodeAt(i++); + chr3 = input.charCodeAt(i++); + enc1 = chr1 >> 2; + enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); + enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); + enc4 = chr3 & 63; + if (isNaN(chr2)) { + enc3 = enc4 = 64; + } else if (isNaN(chr3)) { + enc4 = 64; + } + output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4); + } + + return output; + } + + getAuthorization(jsonData){ + jsonData = jsonData || {}; + var defaultUser = jsonData.user || "root"; + var defaultPassword = jsonData.password || "taosdata"; + + return "Basic " + this.encode(defaultUser + ":" + defaultPassword); + } + + generateAlias(options, target){ + var alias = target.alias || ""; + alias = this.templateSrv.replace(alias, options.scopedVars, 'csv'); + return alias; + } + + generateSql(options, target) { + var sql = target.sql; + if (sql == null || sql == ""){ + return sql; + } + + var queryStart = "now-1h"; + if (options != null && options.range != null && options.range.from != null){ + queryStart = options.range.from.toISOString(); + } + + var queryEnd = "now"; + if (options != null && options.range != null && options.range.to != null){ + queryEnd = options.range.to.toISOString(); + } + var intervalMs = options.intervalMs || "20000"; + + intervalMs += "a"; + sql = sql.replace(/^\s+|\s+$/gm, ''); + sql = sql.replace("$from", "'" + queryStart + "'"); + sql = sql.replace("$begin", "'" + queryStart + "'"); + sql = sql.replace("$to", "'" + queryEnd + "'"); + sql = sql.replace("$end", "'" + queryEnd + "'"); + sql = sql.replace("$interval", intervalMs); + + sql = this.templateSrv.replace(sql, options.scopedVars, 'csv'); + return sql; + } + +} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/src/img/taosdata_logo.png b/src/connector/grafana/tdengine/src/img/taosdata_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..88d3bacd09593f2735f279714f4a534624042838 Binary files /dev/null and b/src/connector/grafana/tdengine/src/img/taosdata_logo.png differ diff --git a/src/connector/grafana/tdengine/src/module.js b/src/connector/grafana/tdengine/src/module.js new file mode 100644 index 0000000000000000000000000000000000000000..8ab46f4edec3d0d65184984e8e9470b5e2626121 --- /dev/null +++ b/src/connector/grafana/tdengine/src/module.js @@ -0,0 +1,19 @@ +import {GenericDatasource} from './datasource'; +import {GenericDatasourceQueryCtrl} from './query_ctrl'; + +class GenericConfigCtrl {} +GenericConfigCtrl.templateUrl = 'partials/config.html'; + +class GenericQueryOptionsCtrl {} +GenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html'; + +class GenericAnnotationsQueryCtrl {} +GenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html' + +export { + GenericDatasource as Datasource, + GenericDatasourceQueryCtrl as QueryCtrl, + GenericConfigCtrl as ConfigCtrl, + GenericQueryOptionsCtrl as QueryOptionsCtrl, + GenericAnnotationsQueryCtrl as AnnotationsQueryCtrl +}; diff --git a/src/connector/grafana/tdengine/src/partials/config.html b/src/connector/grafana/tdengine/src/partials/config.html new file mode 100644 index 0000000000000000000000000000000000000000..801a75327188e95fa437ee1aabe06e6d04101f0a --- /dev/null +++ b/src/connector/grafana/tdengine/src/partials/config.html @@ -0,0 +1,19 @@ +

    TDengine Connection

    + +
    +
    + Host + +
    + +
    +
    + User + +
    +
    + Password + +
    +
    +
    \ No newline at end of file diff --git a/src/connector/grafana/tdengine/src/partials/query.editor.html b/src/connector/grafana/tdengine/src/partials/query.editor.html new file mode 100644 index 0000000000000000000000000000000000000000..4fd209d39459166482139472996d245cd99a0ccd --- /dev/null +++ b/src/connector/grafana/tdengine/src/partials/query.editor.html @@ -0,0 +1,58 @@ + + +
    +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    + +
    +
    + +
    +
    + +
    +
    {{ctrl.lastGenerateSQL}}
    +
    + +
    +
    Use any SQL that can return Resultset such as:
    +- [[timestamp1, value1], [timestamp2, value2], ... ]
    +
    +Macros:
    +- $from -> start timestamp of panel
    +- $to -> stop timestamp of panel
    +- $interval -> interval of panel
    +
    +Example of SQL:
    +  SELECT count(*)
    +  FROM db.table
    +  WHERE ts > $from and ts < $to
    +  INTERVAL ($interval)
    +    
    +
    + +
    +
    {{ctrl.lastQueryError}}
    +
    + +
    diff --git a/src/connector/grafana/tdengine/src/plugin.json b/src/connector/grafana/tdengine/src/plugin.json new file mode 100644 index 0000000000000000000000000000000000000000..e9954ce6ce16c7b943f3002896144891c9dbc629 --- /dev/null +++ b/src/connector/grafana/tdengine/src/plugin.json @@ -0,0 +1,35 @@ +{ + "name": "TDengine", + "id": "taosdata-tdengine-datasource", + "type": "datasource", + + "partials": { + "config": "partials/config.html" + }, + + "metrics": true, + "annotations": false, + + "info": { + "description": "grafana datasource plugin for tdengine", + "author": { + "name": "Taosdata Inc.", + "url": "https://www.taosdata.com" + }, + "logos": { + "small": "img/taosdata_logo.png", + "large": "img/taosdata_logo.png" + }, + "links": [ + {"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"}, + {"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"} + ], + "version": "1.0.0", + "updated": "2020-01-13" + }, + + "dependencies": { + "grafanaVersion": "5.2.4", + "plugins": [ ] + } +} diff --git a/src/connector/grafana/tdengine/src/query_ctrl.js b/src/connector/grafana/tdengine/src/query_ctrl.js new file mode 100644 index 0000000000000000000000000000000000000000..2b3998f2ab30bb571ffd66275ec49e632e7236f5 --- /dev/null +++ b/src/connector/grafana/tdengine/src/query_ctrl.js @@ -0,0 +1,25 @@ +import {QueryCtrl} from 'app/plugins/sdk'; +import './css/query-editor.css!' + +export class GenericDatasourceQueryCtrl extends QueryCtrl { + + constructor($scope, $injector) { + super($scope, $injector); + + this.scope = $scope; + this.target.target = this.target.target || 'select metric'; + this.target.type = this.target.type || 'timeserie'; + } + + onChangeInternal() { + this.panelCtrl.refresh(); // Asks the panel to refresh data. + } + + generateSQL(query) { + this.lastGenerateSQL = this.datasource.generateSql( this.panelCtrl, this.target); + this.showGenerateSQL = !this.showGenerateSQL; + } + +} + +GenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html'; \ No newline at end of file diff --git a/src/connector/grafana/tdengine/yarn.lock b/src/connector/grafana/tdengine/yarn.lock new file mode 100644 index 0000000000000000000000000000000000000000..fe7e8122ec371f66811c235be81f2e1276ccd5b1 --- /dev/null +++ b/src/connector/grafana/tdengine/yarn.lock @@ -0,0 +1,2963 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@types/estree@0.0.38": + version "0.0.38" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.38.tgz#c1be40aa933723c608820a99a373a16d215a1ca2" + +"@types/node@*": + version "10.3.6" + resolved "https://registry.yarnpkg.com/@types/node/-/node-10.3.6.tgz#ea8aab9439b59f40d19ec5f13b44642344872b11" + +abab@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/abab/-/abab-1.0.4.tgz#5faad9c2c07f60dd76770f71cf025b62a63cfd4e" + +abbrev@1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f" + +acorn-globals@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-3.1.0.tgz#fd8270f71fbb4996b004fa880ee5d46573a731bf" + dependencies: + acorn "^4.0.4" + +acorn@^4.0.4: + version "4.0.13" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.13.tgz#105495ae5361d697bd195c825192e1ad7f253787" + +ajv@^5.1.0: + version "5.5.2" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" + dependencies: + co "^4.6.0" + fast-deep-equal "^1.0.0" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.3.0" + +align-text@^0.1.1, align-text@^0.1.3: + version "0.1.4" + resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117" + dependencies: + kind-of "^3.0.2" + longest "^1.0.1" + repeat-string "^1.5.2" + +amdefine@>=0.0.4: + version "1.0.1" + resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" + +ansi-colors@3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.3.tgz#57d35b8686e851e2cc04c403f1c00203976a1813" + integrity sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw== + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= + +ansi-regex@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" + integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg== + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + +ansi-styles@^3.2.0, ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + dependencies: + color-convert "^1.9.0" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +array-differ@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/array-differ/-/array-differ-1.0.0.tgz#eff52e3758249d33be402b8bb8e564bb2b5d4031" + +array-equal@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93" + +array-find-index@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + +arrify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + +asn1@~0.2.3: + version "0.2.4" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" + dependencies: + safer-buffer "~2.1.0" + +assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + +assertion-error@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/assertion-error/-/assertion-error-1.0.2.tgz#13ca515d86206da0bac66e834dd397d87581094c" + +async@^1.5.2, async@~1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + +async@^2.6.0: + version "2.6.1" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.1.tgz#b245a23ca71930044ec53fa46aa00a3e87c6a610" + dependencies: + lodash "^4.17.10" + +async@~0.2.6: + version "0.2.10" + resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1" + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + +aws4@^1.6.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.7.0.tgz#d4d0e9b9dbfca77bf08eeb0a8a471550fe39e289" + +babel-code-frame@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.22.0.tgz#027620bee567a88c32561574e7fd0801d33118e4" + dependencies: + chalk "^1.1.0" + esutils "^2.0.2" + js-tokens "^3.0.0" + +babel-code-frame@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +babel-core@^6.0.12, babel-core@^6.23.0: + version "6.23.1" + resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.23.1.tgz#c143cb621bb2f621710c220c5d579d15b8a442df" + dependencies: + babel-code-frame "^6.22.0" + babel-generator "^6.23.0" + babel-helpers "^6.23.0" + babel-messages "^6.23.0" + babel-register "^6.23.0" + babel-runtime "^6.22.0" + babel-template "^6.23.0" + babel-traverse "^6.23.1" + babel-types "^6.23.0" + babylon "^6.11.0" + convert-source-map "^1.1.0" + debug "^2.1.1" + json5 "^0.5.0" + lodash "^4.2.0" + minimatch "^3.0.2" + path-is-absolute "^1.0.0" + private "^0.1.6" + slash "^1.0.0" + source-map "^0.5.0" + +babel-core@^6.24.1, babel-core@^6.26.0: + version "6.26.3" + resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.26.3.tgz#b2e2f09e342d0f0c88e2f02e067794125e75c207" + dependencies: + babel-code-frame "^6.26.0" + babel-generator "^6.26.0" + babel-helpers "^6.24.1" + babel-messages "^6.23.0" + babel-register "^6.26.0" + babel-runtime "^6.26.0" + babel-template "^6.26.0" + babel-traverse "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + convert-source-map "^1.5.1" + debug "^2.6.9" + json5 "^0.5.1" + lodash "^4.17.4" + minimatch "^3.0.4" + path-is-absolute "^1.0.1" + private "^0.1.8" + slash "^1.0.0" + source-map "^0.5.7" + +babel-generator@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.23.0.tgz#6b8edab956ef3116f79d8c84c5a3c05f32a74bc5" + dependencies: + babel-messages "^6.23.0" + babel-runtime "^6.22.0" + babel-types "^6.23.0" + detect-indent "^4.0.0" + jsesc "^1.3.0" + lodash "^4.2.0" + source-map "^0.5.0" + trim-right "^1.0.1" + +babel-generator@^6.26.0: + version "6.26.1" + resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.26.1.tgz#1844408d3b8f0d35a404ea7ac180f087a601bd90" + dependencies: + babel-messages "^6.23.0" + babel-runtime "^6.26.0" + babel-types "^6.26.0" + detect-indent "^4.0.0" + jsesc "^1.3.0" + lodash "^4.17.4" + source-map "^0.5.7" + trim-right "^1.0.1" + +babel-helper-builder-binary-assignment-operator-visitor@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz#cce4517ada356f4220bcae8a02c2b346f9a56664" + dependencies: + babel-helper-explode-assignable-expression "^6.24.1" + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-helper-call-delegate@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d" + dependencies: + babel-helper-hoist-variables "^6.24.1" + babel-runtime "^6.22.0" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-helper-define-map@^6.24.1: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz#a5f56dab41a25f97ecb498c7ebaca9819f95be5f" + dependencies: + babel-helper-function-name "^6.24.1" + babel-runtime "^6.26.0" + babel-types "^6.26.0" + lodash "^4.17.4" + +babel-helper-explode-assignable-expression@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz#f25b82cf7dc10433c55f70592d5746400ac22caa" + dependencies: + babel-runtime "^6.22.0" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-helper-function-name@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9" + dependencies: + babel-helper-get-function-arity "^6.24.1" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-helper-get-function-arity@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-helper-hoist-variables@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.22.0.tgz#3eacbf731d80705845dd2e9718f600cfb9b4ba72" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-helper-hoist-variables@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-helper-optimise-call-expression@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-helper-regex@^6.24.1: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz#325c59f902f82f24b74faceed0363954f6495e72" + dependencies: + babel-runtime "^6.26.0" + babel-types "^6.26.0" + lodash "^4.17.4" + +babel-helper-remap-async-to-generator@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz#5ec581827ad723fecdd381f1c928390676e4551b" + dependencies: + babel-helper-function-name "^6.24.1" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-helper-replace-supers@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a" + dependencies: + babel-helper-optimise-call-expression "^6.24.1" + babel-messages "^6.23.0" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-helpers@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.23.0.tgz#4f8f2e092d0b6a8808a4bde79c27f1e2ecf0d992" + dependencies: + babel-runtime "^6.22.0" + babel-template "^6.23.0" + +babel-helpers@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2" + dependencies: + babel-runtime "^6.22.0" + babel-template "^6.24.1" + +babel-messages@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-check-es2015-constants@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-syntax-async-functions@^6.8.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz#cad9cad1191b5ad634bf30ae0872391e0647be95" + +babel-plugin-syntax-dynamic-import@^6.18.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-dynamic-import/-/babel-plugin-syntax-dynamic-import-6.18.0.tgz#8d6a26229c83745a9982a441051572caa179b1da" + +babel-plugin-syntax-exponentiation-operator@^6.8.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz#9ee7e8337290da95288201a6a57f4170317830de" + +babel-plugin-syntax-object-rest-spread@^6.8.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz#fd6536f2bce13836ffa3a5458c4903a597bb3bf5" + +babel-plugin-syntax-trailing-function-commas@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz#ba0360937f8d06e40180a43fe0d5616fff532cf3" + +babel-plugin-transform-amd-system-wrapper@^0.3.7: + version "0.3.7" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-amd-system-wrapper/-/babel-plugin-transform-amd-system-wrapper-0.3.7.tgz#521c782d35644491c979ea683e8a5e1caff0ba42" + dependencies: + babel-template "^6.9.0" + +babel-plugin-transform-async-to-generator@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz#6536e378aff6cb1d5517ac0e40eb3e9fc8d08761" + dependencies: + babel-helper-remap-async-to-generator "^6.24.1" + babel-plugin-syntax-async-functions "^6.8.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-cjs-system-wrapper@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-cjs-system-wrapper/-/babel-plugin-transform-cjs-system-wrapper-0.6.2.tgz#bd7494775289424ff493b6ed455de495bd71ba1d" + dependencies: + babel-template "^6.9.0" + +babel-plugin-transform-es2015-arrow-functions@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-block-scoped-functions@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-block-scoping@^6.23.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz#d70f5299c1308d05c12f463813b0a09e73b1895f" + dependencies: + babel-runtime "^6.26.0" + babel-template "^6.26.0" + babel-traverse "^6.26.0" + babel-types "^6.26.0" + lodash "^4.17.4" + +babel-plugin-transform-es2015-classes@^6.23.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db" + dependencies: + babel-helper-define-map "^6.24.1" + babel-helper-function-name "^6.24.1" + babel-helper-optimise-call-expression "^6.24.1" + babel-helper-replace-supers "^6.24.1" + babel-messages "^6.23.0" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-plugin-transform-es2015-computed-properties@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3" + dependencies: + babel-runtime "^6.22.0" + babel-template "^6.24.1" + +babel-plugin-transform-es2015-destructuring@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-duplicate-keys@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-plugin-transform-es2015-for-of@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-function-name@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b" + dependencies: + babel-helper-function-name "^6.24.1" + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-plugin-transform-es2015-literals@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-modules-amd@^6.22.0, babel-plugin-transform-es2015-modules-amd@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154" + dependencies: + babel-plugin-transform-es2015-modules-commonjs "^6.24.1" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + +babel-plugin-transform-es2015-modules-commonjs@^6.23.0, babel-plugin-transform-es2015-modules-commonjs@^6.24.1: + version "6.26.2" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz#58a793863a9e7ca870bdc5a881117ffac27db6f3" + dependencies: + babel-plugin-transform-strict-mode "^6.24.1" + babel-runtime "^6.26.0" + babel-template "^6.26.0" + babel-types "^6.26.0" + +babel-plugin-transform-es2015-modules-systemjs@^6.23.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23" + dependencies: + babel-helper-hoist-variables "^6.24.1" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + +babel-plugin-transform-es2015-modules-systemjs@^6.6.5: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.23.0.tgz#ae3469227ffac39b0310d90fec73bfdc4f6317b0" + dependencies: + babel-helper-hoist-variables "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.23.0" + +babel-plugin-transform-es2015-modules-umd@^6.23.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468" + dependencies: + babel-plugin-transform-es2015-modules-amd "^6.24.1" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + +babel-plugin-transform-es2015-object-super@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d" + dependencies: + babel-helper-replace-supers "^6.24.1" + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-parameters@^6.23.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b" + dependencies: + babel-helper-call-delegate "^6.24.1" + babel-helper-get-function-arity "^6.24.1" + babel-runtime "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-plugin-transform-es2015-shorthand-properties@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-plugin-transform-es2015-spread@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-sticky-regex@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc" + dependencies: + babel-helper-regex "^6.24.1" + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-plugin-transform-es2015-template-literals@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-typeof-symbol@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-unicode-regex@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9" + dependencies: + babel-helper-regex "^6.24.1" + babel-runtime "^6.22.0" + regexpu-core "^2.0.0" + +babel-plugin-transform-exponentiation-operator@^6.22.0: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz#2ab0c9c7f3098fa48907772bb813fe41e8de3a0e" + dependencies: + babel-helper-builder-binary-assignment-operator-visitor "^6.24.1" + babel-plugin-syntax-exponentiation-operator "^6.8.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-global-system-wrapper@^0.3.4: + version "0.3.4" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-global-system-wrapper/-/babel-plugin-transform-global-system-wrapper-0.3.4.tgz#948dd7d29fc21447e39bd3447f2debc7f2f73aac" + dependencies: + babel-template "^6.9.0" + +babel-plugin-transform-object-rest-spread@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.26.0.tgz#0f36692d50fef6b7e2d4b3ac1478137a963b7b06" + dependencies: + babel-plugin-syntax-object-rest-spread "^6.8.0" + babel-runtime "^6.26.0" + +babel-plugin-transform-regenerator@^6.22.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz#e0703696fbde27f0a3efcacf8b4dca2f7b3a8f2f" + dependencies: + regenerator-transform "^0.10.0" + +babel-plugin-transform-strict-mode@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-plugin-transform-system-register@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-system-register/-/babel-plugin-transform-system-register-0.0.1.tgz#9dff40390c2763ac518f0b2ad7c5ea4f65a5be25" + +babel-preset-env@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/babel-preset-env/-/babel-preset-env-1.7.0.tgz#dea79fa4ebeb883cd35dab07e260c1c9c04df77a" + dependencies: + babel-plugin-check-es2015-constants "^6.22.0" + babel-plugin-syntax-trailing-function-commas "^6.22.0" + babel-plugin-transform-async-to-generator "^6.22.0" + babel-plugin-transform-es2015-arrow-functions "^6.22.0" + babel-plugin-transform-es2015-block-scoped-functions "^6.22.0" + babel-plugin-transform-es2015-block-scoping "^6.23.0" + babel-plugin-transform-es2015-classes "^6.23.0" + babel-plugin-transform-es2015-computed-properties "^6.22.0" + babel-plugin-transform-es2015-destructuring "^6.23.0" + babel-plugin-transform-es2015-duplicate-keys "^6.22.0" + babel-plugin-transform-es2015-for-of "^6.23.0" + babel-plugin-transform-es2015-function-name "^6.22.0" + babel-plugin-transform-es2015-literals "^6.22.0" + babel-plugin-transform-es2015-modules-amd "^6.22.0" + babel-plugin-transform-es2015-modules-commonjs "^6.23.0" + babel-plugin-transform-es2015-modules-systemjs "^6.23.0" + babel-plugin-transform-es2015-modules-umd "^6.23.0" + babel-plugin-transform-es2015-object-super "^6.22.0" + babel-plugin-transform-es2015-parameters "^6.23.0" + babel-plugin-transform-es2015-shorthand-properties "^6.22.0" + babel-plugin-transform-es2015-spread "^6.22.0" + babel-plugin-transform-es2015-sticky-regex "^6.22.0" + babel-plugin-transform-es2015-template-literals "^6.22.0" + babel-plugin-transform-es2015-typeof-symbol "^6.23.0" + babel-plugin-transform-es2015-unicode-regex "^6.22.0" + babel-plugin-transform-exponentiation-operator "^6.22.0" + babel-plugin-transform-regenerator "^6.22.0" + browserslist "^3.2.6" + invariant "^2.2.2" + semver "^5.3.0" + +babel-register@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.23.0.tgz#c9aa3d4cca94b51da34826c4a0f9e08145d74ff3" + dependencies: + babel-core "^6.23.0" + babel-runtime "^6.22.0" + core-js "^2.4.0" + home-or-tmp "^2.0.0" + lodash "^4.2.0" + mkdirp "^0.5.1" + source-map-support "^0.4.2" + +babel-register@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.26.0.tgz#6ed021173e2fcb486d7acb45c6009a856f647071" + dependencies: + babel-core "^6.26.0" + babel-runtime "^6.26.0" + core-js "^2.5.0" + home-or-tmp "^2.0.0" + lodash "^4.17.4" + mkdirp "^0.5.1" + source-map-support "^0.4.15" + +babel-runtime@^6.18.0, babel-runtime@^6.22.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.23.0.tgz#0a9489f144de70efb3ce4300accdb329e2fc543b" + dependencies: + core-js "^2.4.0" + regenerator-runtime "^0.10.0" + +babel-runtime@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" + dependencies: + core-js "^2.4.0" + regenerator-runtime "^0.11.0" + +babel-template@^6.23.0, babel-template@^6.9.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.23.0.tgz#04d4f270adbb3aa704a8143ae26faa529238e638" + dependencies: + babel-runtime "^6.22.0" + babel-traverse "^6.23.0" + babel-types "^6.23.0" + babylon "^6.11.0" + lodash "^4.2.0" + +babel-template@^6.24.1, babel-template@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.26.0.tgz#de03e2d16396b069f46dd9fff8521fb1a0e35e02" + dependencies: + babel-runtime "^6.26.0" + babel-traverse "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + lodash "^4.17.4" + +babel-traverse@^6.23.0, babel-traverse@^6.23.1: + version "6.23.1" + resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.23.1.tgz#d3cb59010ecd06a97d81310065f966b699e14f48" + dependencies: + babel-code-frame "^6.22.0" + babel-messages "^6.23.0" + babel-runtime "^6.22.0" + babel-types "^6.23.0" + babylon "^6.15.0" + debug "^2.2.0" + globals "^9.0.0" + invariant "^2.2.0" + lodash "^4.2.0" + +babel-traverse@^6.24.1, babel-traverse@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.26.0.tgz#46a9cbd7edcc62c8e5c064e2d2d8d0f4035766ee" + dependencies: + babel-code-frame "^6.26.0" + babel-messages "^6.23.0" + babel-runtime "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + debug "^2.6.8" + globals "^9.18.0" + invariant "^2.2.2" + lodash "^4.17.4" + +babel-types@^6.19.0, babel-types@^6.22.0, babel-types@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.23.0.tgz#bb17179d7538bad38cd0c9e115d340f77e7e9acf" + dependencies: + babel-runtime "^6.22.0" + esutils "^2.0.2" + lodash "^4.2.0" + to-fast-properties "^1.0.1" + +babel-types@^6.24.1, babel-types@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.26.0.tgz#a3b073f94ab49eb6fa55cd65227a334380632497" + dependencies: + babel-runtime "^6.26.0" + esutils "^2.0.2" + lodash "^4.17.4" + to-fast-properties "^1.0.3" + +babel@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel/-/babel-6.23.0.tgz#d0d1e7d803e974765beea3232d4e153c0efb90f4" + +babylon@^6.11.0, babylon@^6.15.0: + version "6.15.0" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.15.0.tgz#ba65cfa1a80e1759b0e89fb562e27dccae70348e" + +babylon@^6.18.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3" + +balanced-match@^0.4.1: + version "0.4.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + +bcrypt-pbkdf@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + dependencies: + tweetnacl "^0.14.3" + +bluebird@^3.3.4: + version "3.4.7" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.4.7.tgz#f72d760be09b7f76d08ed8fae98b289a8d05fab3" + +body@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/body/-/body-5.1.0.tgz#e4ba0ce410a46936323367609ecb4e6553125069" + dependencies: + continuable-cache "^0.3.1" + error "^7.0.0" + raw-body "~1.1.0" + safe-json-parse "~1.0.1" + +brace-expansion@^1.0.0: + version "1.1.6" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.6.tgz#7197d7eaa9b87e648390ea61fc66c84427420df9" + dependencies: + balanced-match "^0.4.1" + concat-map "0.0.1" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +browser-stdout@1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" + integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== + +browserify-zlib@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.1.4.tgz#bb35f8a519f600e0fa6b8485241c979d0141fb2d" + dependencies: + pako "~0.2.0" + +browserslist@^3.2.6: + version "3.2.8" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-3.2.8.tgz#b0005361d6471f0f5952797a76fc985f1f978fc6" + dependencies: + caniuse-lite "^1.0.30000844" + electron-to-chromium "^1.3.47" + +buffer-from@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + +builtin-modules@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" + +bytes@1: + version "1.0.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-1.0.0.tgz#3569ede8ba34315fab99c3e92cb04c7220de1fa8" + +camelcase-keys@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7" + dependencies: + camelcase "^2.0.0" + map-obj "^1.0.0" + +camelcase@^1.0.2: + version "1.2.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39" + +camelcase@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f" + +camelcase@^5.0.0: + version "5.3.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== + +caniuse-lite@^1.0.30000844: + version "1.0.30000859" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000859.tgz#da974adc5348fffe94724877a7ef8cb5d6d3d777" + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + +center-align@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad" + dependencies: + align-text "^0.1.3" + lazy-cache "^1.0.3" + +chai@~3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/chai/-/chai-3.5.0.tgz#4d02637b067fe958bdbfdd3a40ec56fef7373247" + dependencies: + assertion-error "^1.0.1" + deep-eql "^0.1.3" + type-detect "^1.0.0" + +chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.0.1: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e" + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +cliui@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1" + dependencies: + center-align "^0.1.1" + right-align "^0.1.1" + wordwrap "0.0.2" + +cliui@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5" + integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA== + dependencies: + string-width "^3.1.0" + strip-ansi "^5.2.0" + wrap-ansi "^5.1.0" + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + +coffeescript@~1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/coffeescript/-/coffeescript-1.10.0.tgz#e7aa8301917ef621b35d8a39f348dcdd1db7e33e" + +color-convert@^1.9.0: + version "1.9.2" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.2.tgz#49881b8fba67df12a96bdf3f56c0aab9e7913147" + dependencies: + color-name "1.1.1" + +color-name@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.1.tgz#4b1415304cf50028ea81643643bd82ea05803689" + +colors@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63" + +combined-stream@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.6.tgz#723e7df6e801ac5613113a7e445a9b69cb632818" + dependencies: + delayed-stream "~1.0.0" + +combined-stream@~1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009" + dependencies: + delayed-stream "~1.0.0" + +commander@2.9.x: + version "2.9.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4" + dependencies: + graceful-readlink ">= 1.0.0" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + +concat-stream@^1.4.1: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +content-type-parser@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/content-type-parser/-/content-type-parser-1.0.2.tgz#caabe80623e63638b2502fd4c7f12ff4ce2352e7" + +continuable-cache@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/continuable-cache/-/continuable-cache-0.3.1.tgz#bd727a7faed77e71ff3985ac93351a912733ad0f" + +convert-source-map@^1.1.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.4.0.tgz#e3dad195bf61bfe13a7a3c73e9876ec14a0268f3" + +convert-source-map@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.1.tgz#b8278097b9bc229365de5c62cf5fcaed8b5599e5" + +core-js@^2.4.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e" + +core-js@^2.5.0: + version "2.5.7" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.5.7.tgz#f972608ff0cead68b841a16a932d0b183791814e" + +core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + +cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0": + version "0.3.2" + resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.2.tgz#b8036170c79f07a90ff2f16e22284027a243848b" + +"cssstyle@>= 0.2.37 < 0.3.0": + version "0.2.37" + resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.37.tgz#541097234cb2513c83ceed3acddc27ff27987d54" + dependencies: + cssom "0.3.x" + +currently-unhandled@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea" + dependencies: + array-find-index "^1.0.1" + +d@1: + version "1.0.0" + resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f" + dependencies: + es5-ext "^0.10.9" + +d@^0.1.1, d@~0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309" + dependencies: + es5-ext "~0.10.2" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + dependencies: + assert-plus "^1.0.0" + +data-uri-to-buffer@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/data-uri-to-buffer/-/data-uri-to-buffer-0.0.4.tgz#46e13ab9da8e309745c8d01ce547213ebdb2fe3f" + +dateformat@~1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-1.0.12.tgz#9f124b67594c937ff706932e4a642cca8dbbfee9" + dependencies: + get-stdin "^4.0.1" + meow "^3.3.0" + +debug@3.2.6: + version "3.2.6" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b" + integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ== + dependencies: + ms "^2.1.1" + +debug@^2.1.1, debug@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da" + dependencies: + ms "0.7.1" + +debug@^2.6.8, debug@^2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + dependencies: + ms "2.0.0" + +debug@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + dependencies: + ms "2.0.0" + +decamelize@^1.0.0, decamelize@^1.1.2, decamelize@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + +deep-eql@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-0.1.3.tgz#ef558acab8de25206cd713906d74e56930eb69f2" + dependencies: + type-detect "0.1.1" + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + +define-properties@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.2.tgz#83a73f2fea569898fb737193c8f873caf6d45c94" + dependencies: + foreach "^2.0.5" + object-keys "^1.0.8" + +define-properties@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" + integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ== + dependencies: + object-keys "^1.0.12" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + +detect-indent@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208" + dependencies: + repeating "^2.0.0" + +diff@3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12" + integrity sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA== + +ecc-jsbn@~0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" + dependencies: + jsbn "~0.1.0" + safer-buffer "^2.1.0" + +electron-to-chromium@^1.3.47: + version "1.3.50" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.50.tgz#7438b76f92b41b919f3fbdd350fbd0757dacddf7" + +emoji-regex@^7.0.1: + version "7.0.3" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" + integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== + +error-ex@^1.2.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + dependencies: + is-arrayish "^0.2.1" + +error@^7.0.0: + version "7.0.2" + resolved "https://registry.yarnpkg.com/error/-/error-7.0.2.tgz#a5f75fff4d9926126ddac0ea5dc38e689153cb02" + dependencies: + string-template "~0.2.1" + xtend "~4.0.0" + +es-abstract@^1.17.0-next.1: + version "1.17.0-next.1" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.17.0-next.1.tgz#94acc93e20b05a6e96dacb5ab2f1cb3a81fc2172" + integrity sha512-7MmGr03N7Rnuid6+wyhD9sHNE2n4tFSwExnU2lQl3lIo2ShXWGePY80zYaoMOmILWv57H0amMjZGHNzzGG70Rw== + dependencies: + es-to-primitive "^1.2.1" + function-bind "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.1" + is-callable "^1.1.4" + is-regex "^1.0.4" + object-inspect "^1.7.0" + object-keys "^1.1.1" + object.assign "^4.1.0" + string.prototype.trimleft "^2.1.0" + string.prototype.trimright "^2.1.0" + +es-to-primitive@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" + integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA== + dependencies: + is-callable "^1.1.4" + is-date-object "^1.0.1" + is-symbol "^1.0.2" + +es5-ext@^0.10.12, es5-ext@^0.10.7, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.2: + version "0.10.12" + resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.12.tgz#aa84641d4db76b62abba5e45fd805ecbab140047" + dependencies: + es6-iterator "2" + es6-symbol "~3.1" + +es6-iterator@2: + version "2.0.0" + resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.0.tgz#bd968567d61635e33c0b80727613c9cb4b096bac" + dependencies: + d "^0.1.1" + es5-ext "^0.10.7" + es6-symbol "3" + +es6-symbol@3, es6-symbol@~3.1: + version "3.1.0" + resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.0.tgz#94481c655e7a7cad82eba832d97d5433496d7ffa" + dependencies: + d "~0.1.1" + es5-ext "~0.10.11" + +es6-template-strings@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/es6-template-strings/-/es6-template-strings-2.0.1.tgz#b166c6a62562f478bb7775f6ca96103a599b4b2c" + dependencies: + es5-ext "^0.10.12" + esniff "^1.1" + +escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + +escodegen@^1.6.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.8.1.tgz#5a5b53af4693110bebb0867aa3430dd3b70a1018" + dependencies: + esprima "^2.7.1" + estraverse "^1.9.1" + esutils "^2.0.2" + optionator "^0.8.1" + optionalDependencies: + source-map "~0.2.0" + +esniff@^1.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/esniff/-/esniff-1.1.0.tgz#c66849229f91464dede2e0d40201ed6abf65f2ac" + dependencies: + d "1" + es5-ext "^0.10.12" + +esprima@^2.7.1: + version "2.7.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581" + +esprima@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +estraverse@^1.9.1: + version "1.9.3" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.9.3.tgz#af67f2dc922582415950926091a4005d29c9bb44" + +esutils@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" + +eventemitter2@~0.4.13: + version "0.4.14" + resolved "https://registry.yarnpkg.com/eventemitter2/-/eventemitter2-0.4.14.tgz#8f61b75cde012b2e9eb284d4545583b5643b61ab" + +exit@~0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" + +extend@~3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + +extsprintf@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550" + +fast-deep-equal@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz#c053477817c86b51daa853c81e059b733d023614" + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + +fast-levenshtein@~2.0.4: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + +faye-websocket@~0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" + dependencies: + websocket-driver ">=0.5.1" + +figures@^1.0.1: + version "1.7.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-1.7.0.tgz#cbe1e3affcf1cd44b80cadfed28dc793a9701d2e" + dependencies: + escape-string-regexp "^1.0.5" + object-assign "^4.1.0" + +file-sync-cmp@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/file-sync-cmp/-/file-sync-cmp-0.1.1.tgz#a5e7a8ffbfa493b43b923bbd4ca89a53b63b612b" + +find-up@3.0.0, find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== + dependencies: + locate-path "^3.0.0" + +find-up@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f" + dependencies: + path-exists "^2.0.0" + pinkie-promise "^2.0.0" + +findup-sync@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16" + dependencies: + glob "~5.0.0" + +flat@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/flat/-/flat-4.1.0.tgz#090bec8b05e39cba309747f1d588f04dbaf98db2" + integrity sha512-Px/TiLIznH7gEDlPXcUD4KnBusa6kR6ayRUVcnEAbreRIuhkqow/mun59BuRXwoYk7ZQOLW1ZM05ilIvK38hFw== + dependencies: + is-buffer "~2.0.3" + +foreach@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + +form-data@~2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.2.tgz#4970498be604c20c005d4f5c23aecd21d6b49099" + dependencies: + asynckit "^0.4.0" + combined-stream "1.0.6" + mime-types "^2.1.12" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + +gaze@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/gaze/-/gaze-1.1.3.tgz#c441733e13b927ac8c0ff0b4c3b033f28812924a" + dependencies: + globule "^1.0.0" + +get-caller-file@^2.0.1: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +get-stdin@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe" + +getobject@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/getobject/-/getobject-0.1.0.tgz#047a449789fa160d018f5486ed91320b6ec7885c" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + dependencies: + assert-plus "^1.0.0" + +glob@5.0.x, glob@~5.0.0: + version "5.0.15" + resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@7.1.3: + version "7.1.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1" + integrity sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.0.3: + version "7.1.1" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.2" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.0.5, glob@~7.1.1: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@~7.0.0: + version "7.0.6" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.0.6.tgz#211bafaf49e525b8cd93260d14ab136152b3f57a" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.2" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^9.0.0: + version "9.16.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-9.16.0.tgz#63e903658171ec2d9f51b1d31de5e2b8dc01fb80" + +globals@^9.18.0: + version "9.18.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-9.18.0.tgz#aa3896b3e69b487f17e31ed2143d69a8e30c2d8a" + +globule@^1.0.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/globule/-/globule-1.2.1.tgz#5dffb1b191f22d20797a9369b49eab4e9839696d" + dependencies: + glob "~7.1.1" + lodash "~4.17.10" + minimatch "~3.0.2" + +graceful-fs@^4.1.2: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + +"graceful-readlink@>= 1.0.0": + version "1.0.1" + resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725" + +growl@1.10.5: + version "1.10.5" + resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" + integrity sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA== + +grunt-babel@~6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/grunt-babel/-/grunt-babel-6.0.0.tgz#378189b487de1168c4c4a9fc88dd6005b35df960" + dependencies: + babel-core "^6.0.12" + +grunt-cli@^1.2.0, grunt-cli@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/grunt-cli/-/grunt-cli-1.2.0.tgz#562b119ebb069ddb464ace2845501be97b35b6a8" + dependencies: + findup-sync "~0.3.0" + grunt-known-options "~1.1.0" + nopt "~3.0.6" + resolve "~1.1.0" + +grunt-contrib-clean@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/grunt-contrib-clean/-/grunt-contrib-clean-1.1.0.tgz#564abf2d0378a983a15b9e3f30ee75b738c40638" + dependencies: + async "^1.5.2" + rimraf "^2.5.1" + +grunt-contrib-copy@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/grunt-contrib-copy/-/grunt-contrib-copy-1.0.0.tgz#7060c6581e904b8ab0d00f076e0a8f6e3e7c3573" + dependencies: + chalk "^1.1.1" + file-sync-cmp "^0.1.0" + +grunt-contrib-uglify@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/grunt-contrib-uglify/-/grunt-contrib-uglify-2.3.0.tgz#b3d0260ebdd6cefa12ff2f8e9e1e259f7de4216f" + dependencies: + chalk "^1.0.0" + maxmin "^1.1.0" + object.assign "^4.0.4" + uglify-js "~2.8.21" + uri-path "^1.0.0" + +grunt-contrib-watch@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/grunt-contrib-watch/-/grunt-contrib-watch-1.1.0.tgz#c143ca5b824b288a024b856639a5345aedb78ed4" + dependencies: + async "^2.6.0" + gaze "^1.1.0" + lodash "^4.17.10" + tiny-lr "^1.1.1" + +grunt-known-options@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/grunt-known-options/-/grunt-known-options-1.1.0.tgz#a4274eeb32fa765da5a7a3b1712617ce3b144149" + +grunt-legacy-log-utils@~2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/grunt-legacy-log-utils/-/grunt-legacy-log-utils-2.0.1.tgz#d2f442c7c0150065d9004b08fd7410d37519194e" + dependencies: + chalk "~2.4.1" + lodash "~4.17.10" + +grunt-legacy-log@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/grunt-legacy-log/-/grunt-legacy-log-2.0.0.tgz#c8cd2c6c81a4465b9bbf2d874d963fef7a59ffb9" + dependencies: + colors "~1.1.2" + grunt-legacy-log-utils "~2.0.0" + hooker "~0.2.3" + lodash "~4.17.5" + +grunt-legacy-util@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/grunt-legacy-util/-/grunt-legacy-util-1.1.1.tgz#e10624e7c86034e5b870c8a8616743f0a0845e42" + dependencies: + async "~1.5.2" + exit "~0.1.1" + getobject "~0.1.0" + hooker "~0.2.3" + lodash "~4.17.10" + underscore.string "~3.3.4" + which "~1.3.0" + +grunt-mocha-test@^0.13.2: + version "0.13.3" + resolved "https://registry.yarnpkg.com/grunt-mocha-test/-/grunt-mocha-test-0.13.3.tgz#9028472b615bda6ddeaa7b30a5a164e9805de005" + dependencies: + hooker "^0.2.3" + mkdirp "^0.5.0" + +grunt-systemjs-builder@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/grunt-systemjs-builder/-/grunt-systemjs-builder-1.0.0.tgz#5d8e7cbeca5b35e2b7b6bd002e9a9d7e03d7decd" + dependencies: + systemjs-builder "0.14.11 - 0.16.x" + +grunt@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/grunt/-/grunt-1.0.4.tgz#c799883945a53a3d07622e0737c8f70bfe19eb38" + integrity sha512-PYsMOrOC+MsdGEkFVwMaMyc6Ob7pKmq+deg1Sjr+vvMWp35sztfwKE7qoN51V+UEtHsyNuMcGdgMLFkBHvMxHQ== + dependencies: + coffeescript "~1.10.0" + dateformat "~1.0.12" + eventemitter2 "~0.4.13" + exit "~0.1.1" + findup-sync "~0.3.0" + glob "~7.0.0" + grunt-cli "~1.2.0" + grunt-known-options "~1.1.0" + grunt-legacy-log "~2.0.0" + grunt-legacy-util "~1.1.1" + iconv-lite "~0.4.13" + js-yaml "~3.13.0" + minimatch "~3.0.2" + mkdirp "~0.5.1" + nopt "~3.0.6" + path-is-absolute "~1.0.0" + rimraf "~2.6.2" + +gzip-size@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/gzip-size/-/gzip-size-1.0.0.tgz#66cf8b101047227b95bace6ea1da0c177ed5c22f" + dependencies: + browserify-zlib "^0.1.4" + concat-stream "^1.4.1" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + +har-validator@~5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.0.3.tgz#ba402c266194f15956ef15e0fcf242993f6a7dfd" + dependencies: + ajv "^5.1.0" + har-schema "^2.0.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + dependencies: + ansi-regex "^2.0.0" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + +has-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44" + +has-symbols@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.1.tgz#9f5214758a44196c406d9bd76cebf81ec2dd31e8" + integrity sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg== + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +he@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" + integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== + +home-or-tmp@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.1" + +hooker@^0.2.3, hooker@~0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/hooker/-/hooker-0.2.3.tgz#b834f723cc4a242aa65963459df6d984c5d3d959" + +hosted-git-info@^2.1.4: + version "2.6.1" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.6.1.tgz#6e4cee78b01bb849dcf93527708c69fdbee410df" + +html-encoding-sniffer@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz#e70d84b94da53aa375e11fe3a351be6642ca46f8" + dependencies: + whatwg-encoding "^1.0.1" + +http-parser-js@>=0.4.0: + version "0.4.13" + resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.4.13.tgz#3bd6d6fde6e3172c9334c3b33b6c193d80fe1137" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +iconv-lite@0.4.19: + version "0.4.19" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.19.tgz#f7468f60135f5e5dad3399c0a81be9a1603a082b" + +iconv-lite@~0.4.13: + version "0.4.23" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.23.tgz#297871f63be507adcfbfca715d0cd0eed84e9a63" + dependencies: + safer-buffer ">= 2.1.2 < 3" + +indent-string@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80" + dependencies: + repeating "^2.0.0" + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@^2.0.3, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +invariant@^2.2.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360" + dependencies: + loose-envify "^1.0.0" + +invariant@^2.2.2: + version "2.2.4" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" + dependencies: + loose-envify "^1.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + +is-buffer@^1.0.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.4.tgz#cfc86ccd5dc5a52fa80489111c6920c457e2d98b" + +is-buffer@~2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.4.tgz#3e572f23c8411a5cfd9557c849e3665e0b290623" + integrity sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A== + +is-builtin-module@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe" + dependencies: + builtin-modules "^1.0.0" + +is-callable@^1.1.4: + version "1.1.5" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.5.tgz#f7e46b596890456db74e7f6e976cb3273d06faab" + integrity sha512-ESKv5sMCJB2jnHTWZ3O5itG+O128Hsus4K4Qh1h2/cgn2vbgnLSVqfV46AeJA9D5EeeLa9w81KUXMtn34zhX+Q== + +is-date-object@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.2.tgz#bda736f2cd8fd06d32844e7743bfa7494c3bfd7e" + integrity sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g== + +is-finite@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= + +is-regex@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.5.tgz#39d589a358bf18967f726967120b8fc1aed74eae" + integrity sha512-vlKW17SNq44owv5AQR3Cq0bQPEb8+kF3UKZ2fiZNOWtztYE5i0CzCZxFDwO58qAOWtxdBRVO/V5Qin1wjCqFYQ== + dependencies: + has "^1.0.3" + +is-symbol@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.3.tgz#38e1014b9e6329be0de9d24a414fd7441ec61937" + integrity sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ== + dependencies: + has-symbols "^1.0.1" + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + +is-utf8@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72" + +isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + +js-tokens@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.1.tgz#08e9f132484a2c45a30907e9dc4d5567b7f114d7" + +js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + +js-yaml@3.13.1, js-yaml@~3.13.0: + version "3.13.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" + integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + +jsdom@~9.12.0: + version "9.12.0" + resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-9.12.0.tgz#e8c546fffcb06c00d4833ca84410fed7f8a097d4" + dependencies: + abab "^1.0.3" + acorn "^4.0.4" + acorn-globals "^3.1.0" + array-equal "^1.0.0" + content-type-parser "^1.0.1" + cssom ">= 0.3.2 < 0.4.0" + cssstyle ">= 0.2.37 < 0.3.0" + escodegen "^1.6.1" + html-encoding-sniffer "^1.0.1" + nwmatcher ">= 1.3.9 < 2.0.0" + parse5 "^1.5.1" + request "^2.79.0" + sax "^1.2.1" + symbol-tree "^3.2.1" + tough-cookie "^2.3.2" + webidl-conversions "^4.0.0" + whatwg-encoding "^1.0.1" + whatwg-url "^4.3.0" + xml-name-validator "^2.0.1" + +jsesc@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b" + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + +json-schema-traverse@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" + +json-schema@0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + +json5@^0.5.0, json5@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" + +jsprim@^1.2.2: + version "1.3.1" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.3.1.tgz#2a7256f70412a29ee3670aaca625994c4dcff252" + dependencies: + extsprintf "1.0.2" + json-schema "0.2.3" + verror "1.3.6" + +kind-of@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.1.0.tgz#475d698a5e49ff5e53d14e3e732429dc8bf4cf47" + dependencies: + is-buffer "^1.0.2" + +lazy-cache@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e" + +levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +livereload-js@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.3.0.tgz#c3ab22e8aaf5bf3505d80d098cbad67726548c9a" + +load-grunt-tasks@^3.5.2: + version "3.5.2" + resolved "https://registry.yarnpkg.com/load-grunt-tasks/-/load-grunt-tasks-3.5.2.tgz#0728561180fd20ff8a6927505852fc58aaea0c88" + dependencies: + arrify "^1.0.0" + multimatch "^2.0.0" + pkg-up "^1.0.0" + resolve-pkg "^0.1.0" + +load-json-file@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0" + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + pinkie-promise "^2.0.0" + strip-bom "^2.0.0" + +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + +lodash@^4.17.10, lodash@^4.17.13, lodash@^4.17.4, lodash@^4.2.0, lodash@~4.17.10, lodash@~4.17.5: + version "4.17.13" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.13.tgz#0bdc3a6adc873d2f4e0c4bac285df91b64fc7b93" + +lodash@^4.17.15: + version "4.17.15" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548" + integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A== + +log-symbols@2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-2.2.0.tgz#5740e1c5d6f0dfda4ad9323b5332107ef6b4c40a" + integrity sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg== + dependencies: + chalk "^2.0.1" + +longest@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097" + +loose-envify@^1.0.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848" + dependencies: + js-tokens "^3.0.0" + +loud-rejection@^1.0.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f" + dependencies: + currently-unhandled "^0.4.1" + signal-exit "^3.0.0" + +map-obj@^1.0.0, map-obj@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d" + +maxmin@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/maxmin/-/maxmin-1.1.0.tgz#71365e84a99dd8f8b3f7d5fde2f00d1e7f73be61" + dependencies: + chalk "^1.0.0" + figures "^1.0.1" + gzip-size "^1.0.0" + pretty-bytes "^1.0.0" + +meow@^3.1.0, meow@^3.3.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/meow/-/meow-3.7.0.tgz#72cb668b425228290abbfa856892587308a801fb" + dependencies: + camelcase-keys "^2.0.0" + decamelize "^1.1.2" + loud-rejection "^1.0.0" + map-obj "^1.0.1" + minimist "^1.1.3" + normalize-package-data "^2.3.4" + object-assign "^4.0.1" + read-pkg-up "^1.0.1" + redent "^1.0.0" + trim-newlines "^1.0.0" + +mime-db@~1.26.0: + version "1.26.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.26.0.tgz#eaffcd0e4fc6935cf8134da246e2e6c35305adff" + +mime-db@~1.33.0: + version "1.33.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" + +mime-types@^2.1.12: + version "2.1.14" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.14.tgz#f7ef7d97583fcaf3b7d282b6f8b5679dab1e94ee" + dependencies: + mime-db "~1.26.0" + +mime-types@~2.1.17: + version "2.1.18" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + dependencies: + mime-db "~1.33.0" + +"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774" + dependencies: + brace-expansion "^1.0.0" + +minimatch@3.0.4, minimatch@^3.0.4, minimatch@~3.0.2: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + dependencies: + brace-expansion "^1.1.7" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + +minimist@^1.1.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + +mkdirp@0.5.1, mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + dependencies: + minimist "0.0.8" + +mocha@^6.2.2: + version "6.2.2" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-6.2.2.tgz#5d8987e28940caf8957a7d7664b910dc5b2fea20" + integrity sha512-FgDS9Re79yU1xz5d+C4rv1G7QagNGHZ+iXF81hO8zY35YZZcLEsJVfFolfsqKFWunATEvNzMK0r/CwWd/szO9A== + dependencies: + ansi-colors "3.2.3" + browser-stdout "1.3.1" + debug "3.2.6" + diff "3.5.0" + escape-string-regexp "1.0.5" + find-up "3.0.0" + glob "7.1.3" + growl "1.10.5" + he "1.2.0" + js-yaml "3.13.1" + log-symbols "2.2.0" + minimatch "3.0.4" + mkdirp "0.5.1" + ms "2.1.1" + node-environment-flags "1.0.5" + object.assign "4.1.0" + strip-json-comments "2.0.1" + supports-color "6.0.0" + which "1.3.1" + wide-align "1.1.3" + yargs "13.3.0" + yargs-parser "13.1.1" + yargs-unparser "1.6.0" + +ms@0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + +ms@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" + integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg== + +ms@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +multimatch@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/multimatch/-/multimatch-2.1.0.tgz#9c7906a22fb4c02919e2f5f75161b4cdbd4b2a2b" + dependencies: + array-differ "^1.0.0" + array-union "^1.0.1" + arrify "^1.0.0" + minimatch "^3.0.0" + +node-environment-flags@1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/node-environment-flags/-/node-environment-flags-1.0.5.tgz#fa930275f5bf5dae188d6192b24b4c8bbac3d76a" + integrity sha512-VNYPRfGfmZLx0Ye20jWzHUjyTW/c+6Wq+iLhDzUI4XmhrDd9l/FozXV3F2xOaXjvp0co0+v1YSR3CMP6g+VvLQ== + dependencies: + object.getownpropertydescriptors "^2.0.3" + semver "^5.7.0" + +nopt@~3.0.6: + version "3.0.6" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9" + dependencies: + abbrev "1" + +normalize-package-data@^2.3.2, normalize-package-data@^2.3.4: + version "2.4.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.4.0.tgz#12f95a307d58352075a04907b84ac8be98ac012f" + dependencies: + hosted-git-info "^2.1.4" + is-builtin-module "^1.0.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + +"nwmatcher@>= 1.3.9 < 2.0.0": + version "1.4.4" + resolved "https://registry.yarnpkg.com/nwmatcher/-/nwmatcher-1.4.4.tgz#2285631f34a95f0d0395cd900c96ed39b58f346e" + +oauth-sign@~0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43" + +object-assign@^4.0.1, object-assign@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + +object-inspect@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.7.0.tgz#f4f6bd181ad77f006b5ece60bd0b6f398ff74a67" + integrity sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw== + +object-keys@^1.0.11, object-keys@^1.0.8: + version "1.0.12" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" + +object-keys@^1.0.12, object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + +object.assign@4.1.0, object.assign@^4.0.4, object.assign@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + +object.getownpropertydescriptors@^2.0.3: + version "2.1.0" + resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.0.tgz#369bf1f9592d8ab89d712dced5cb81c7c5352649" + integrity sha512-Z53Oah9A3TdLoblT7VKJaTDdXdT+lQO+cNpKVnya5JDe9uLvzu1YyY1yFDFrcxrlRgWrEFH0jJtD/IbuwjcEVg== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.0-next.1" + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + dependencies: + wrappy "1" + +optionator@^0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.4" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + wordwrap "~1.0.0" + +os-homedir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + +os-tmpdir@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + +p-limit@^2.0.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.2.1.tgz#aa07a788cc3151c939b5131f63570f0dd2009537" + integrity sha512-85Tk+90UCVWvbDavCLKPOLC9vvY8OwEX/RtKF+/1OADJMVlFfEHOiMTPVyxg7mk/dKa+ipdHm0OUkTvCpMTuwg== + dependencies: + p-try "^2.0.0" + +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== + dependencies: + p-limit "^2.0.0" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +pako@~0.2.0: + version "0.2.9" + resolved "https://registry.yarnpkg.com/pako/-/pako-0.2.9.tgz#f3f7522f4ef782348da8161bad9ecfd51bf83a75" + +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + dependencies: + error-ex "^1.2.0" + +parse5@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-1.5.1.tgz#9b7f3b0de32be78dc2401b17573ccaf0f6f59d94" + +path-exists@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" + dependencies: + pinkie-promise "^2.0.0" + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU= + +path-is-absolute@^1.0.0, path-is-absolute@^1.0.1, path-is-absolute@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + +path-type@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441" + dependencies: + graceful-fs "^4.1.2" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + +pkg-up@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-1.0.0.tgz#3e08fb461525c4421624a33b9f7e6d0af5b05a26" + dependencies: + find-up "^1.0.0" + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + +pretty-bytes@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-1.0.4.tgz#0a22e8210609ad35542f8c8d5d2159aff0751c84" + dependencies: + get-stdin "^4.0.1" + meow "^3.1.0" + +private@^0.1.6: + version "0.1.7" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1" + +private@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" + +process-nextick-args@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" + +prunk@^1.3.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/prunk/-/prunk-1.3.1.tgz#5c1f5615c218ac76621b4d8a97ec13717807756a" + +psl@^1.1.24: + version "1.1.28" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.1.28.tgz#4fb6ceb08a1e2214d4fd4de0ca22dae13740bc7b" + +punycode@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + +q@^1.5.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" + +qs@^6.4.0, qs@~6.5.1: + version "6.5.2" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" + +raw-body@~1.1.0: + version "1.1.7" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-1.1.7.tgz#1d027c2bfa116acc6623bca8f00016572a87d425" + dependencies: + bytes "1" + string_decoder "0.10" + +read-pkg-up@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02" + dependencies: + find-up "^1.0.0" + read-pkg "^1.0.0" + +read-pkg@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28" + dependencies: + load-json-file "^1.0.0" + normalize-package-data "^2.3.2" + path-type "^1.0.0" + +readable-stream@^2.2.2: + version "2.3.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +redent@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/redent/-/redent-1.0.0.tgz#cf916ab1fd5f1f16dfb20822dd6ec7f730c2afde" + dependencies: + indent-string "^2.1.0" + strip-indent "^1.0.1" + +regenerate@^1.2.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260" + +regenerator-runtime@^0.10.0: + version "0.10.3" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.3.tgz#8c4367a904b51ea62a908ac310bf99ff90a82a3e" + +regenerator-runtime@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9" + +regenerator-transform@^0.10.0: + version "0.10.1" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.10.1.tgz#1e4996837231da8b7f3cf4114d71b5691a0680dd" + dependencies: + babel-runtime "^6.18.0" + babel-types "^6.19.0" + private "^0.1.6" + +regexpu-core@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-2.0.0.tgz#49d038837b8dcf8bfa5b9a42139938e6ea2ae240" + dependencies: + regenerate "^1.2.1" + regjsgen "^0.2.0" + regjsparser "^0.1.4" + +regjsgen@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7" + +regjsparser@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c" + dependencies: + jsesc "~0.5.0" + +repeat-string@^1.5.2: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + +repeating@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda" + dependencies: + is-finite "^1.0.0" + +request@^2.79.0: + version "2.87.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.87.0.tgz#32f00235cd08d482b4d0d68db93a829c0ed5756e" + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.6.0" + caseless "~0.12.0" + combined-stream "~1.0.5" + extend "~3.0.1" + forever-agent "~0.6.1" + form-data "~2.3.1" + har-validator "~5.0.3" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.17" + oauth-sign "~0.8.2" + performance-now "^2.1.0" + qs "~6.5.1" + safe-buffer "^5.1.1" + tough-cookie "~2.3.3" + tunnel-agent "^0.6.0" + uuid "^3.1.0" + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= + +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== + +resolve-from@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-2.0.0.tgz#9480ab20e94ffa1d9e80a804c7ea147611966b57" + +resolve-pkg@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/resolve-pkg/-/resolve-pkg-0.1.0.tgz#02cc993410e2936962bd97166a1b077da9725531" + dependencies: + resolve-from "^2.0.0" + +resolve@~1.1.0: + version "1.1.7" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b" + +right-align@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef" + dependencies: + align-text "^0.1.1" + +rimraf@^2.5.1, rimraf@~2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" + dependencies: + glob "^7.0.5" + +rollup@^0.58.2: + version "0.58.2" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-0.58.2.tgz#2feddea8c0c022f3e74b35c48e3c21b3433803ce" + dependencies: + "@types/estree" "0.0.38" + "@types/node" "*" + +rsvp@^3.0.13: + version "3.3.3" + resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.3.3.tgz#34633caaf8bc66ceff4be3c2e1dffd032538a813" + +safe-buffer@^5.0.1, safe-buffer@^5.1.1, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + +safe-json-parse@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/safe-json-parse/-/safe-json-parse-1.0.1.tgz#3e76723e38dfdda13c9b1d29a1e07ffee4b30b57" + +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + +sax@^1.2.1: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + +"semver@2 || 3 || 4 || 5", semver@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" + +semver@^4.3.3: + version "4.3.6" + resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da" + +semver@^5.7.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + +set-blocking@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= + +signal-exit@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + +slash@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" + +source-map-support@^0.4.15: + version "0.4.18" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.18.tgz#0286a6de8be42641338594e97ccea75f0a2c585f" + dependencies: + source-map "^0.5.6" + +source-map-support@^0.4.2: + version "0.4.11" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.11.tgz#647f939978b38535909530885303daf23279f322" + dependencies: + source-map "^0.5.3" + +source-map-support@~0.2.8: + version "0.2.10" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc" + dependencies: + source-map "0.1.32" + +source-map@0.1.32: + version "0.1.32" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266" + dependencies: + amdefine ">=0.0.4" + +source-map@^0.5.0, source-map@^0.5.3, source-map@~0.5.1: + version "0.5.6" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412" + +source-map@^0.5.6, source-map@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + +source-map@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.2.0.tgz#dab73fbcfc2ba819b4de03bd6f6eaa48164b3f9d" + dependencies: + amdefine ">=0.0.4" + +spdx-correct@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.0.0.tgz#05a5b4d7153a195bc92c3c425b69f3b2a9524c82" + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.1.0.tgz#2c7ae61056c714a5b9b9b2b2af7d311ef5c78fe9" + +spdx-expression-parse@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz#7a7cd28470cc6d3a1cfe6d66886f6bc430d3ac87" + +sprintf-js@^1.0.3: + version "1.1.2" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.1.2.tgz#da1765262bf8c0f571749f2ad6c26300207ae673" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + +sshpk@^1.7.0: + version "1.16.1" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877" + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + bcrypt-pbkdf "^1.0.0" + dashdash "^1.12.0" + ecc-jsbn "~0.1.1" + getpass "^0.1.1" + jsbn "~0.1.0" + safer-buffer "^2.0.2" + tweetnacl "~0.14.0" + +string-template@~0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/string-template/-/string-template-0.2.1.tgz#42932e598a352d01fc22ec3367d9d84eec6c9add" + +"string-width@^1.0.2 || 2": + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string-width@^3.0.0, string-width@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" + integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== + dependencies: + emoji-regex "^7.0.1" + is-fullwidth-code-point "^2.0.0" + strip-ansi "^5.1.0" + +string.prototype.trimleft@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string.prototype.trimleft/-/string.prototype.trimleft-2.1.1.tgz#9bdb8ac6abd6d602b17a4ed321870d2f8dcefc74" + integrity sha512-iu2AGd3PuP5Rp7x2kEZCrB2Nf41ehzh+goo8TV7z8/XDBbsvc6HQIlUl9RjkZ4oyrW1XM5UwlGl1oVEaDjg6Ag== + dependencies: + define-properties "^1.1.3" + function-bind "^1.1.1" + +string.prototype.trimright@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string.prototype.trimright/-/string.prototype.trimright-2.1.1.tgz#440314b15996c866ce8a0341894d45186200c5d9" + integrity sha512-qFvWL3/+QIgZXVmJBfpHmxLB7xsUXz6HsUmP8+5dRaC3Q7oKUv9Vo6aMCRZC1smrtyECFsIT30PqBJ1gTjAs+g== + dependencies: + define-properties "^1.1.3" + function-bind "^1.1.1" + +string_decoder@0.10: + version "0.10.31" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + dependencies: + safe-buffer "~5.1.0" + +strip-ansi@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8= + dependencies: + ansi-regex "^3.0.0" + +strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" + integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== + dependencies: + ansi-regex "^4.1.0" + +strip-bom@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e" + dependencies: + is-utf8 "^0.2.0" + +strip-indent@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-1.0.1.tgz#0c7962a6adefa7bbd4ac366460a638552ae1a0a2" + dependencies: + get-stdin "^4.0.1" + +strip-json-comments@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + +supports-color@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-6.0.0.tgz#76cfe742cf1f41bb9b1c29ad03068c05b4c0e40a" + integrity sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg== + dependencies: + has-flag "^3.0.0" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + +supports-color@^5.3.0: + version "5.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54" + dependencies: + has-flag "^3.0.0" + +symbol-tree@^3.2.1: + version "3.2.2" + resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.2.tgz#ae27db38f660a7ae2e1c3b7d1bc290819b8519e6" + +"systemjs-builder@0.14.11 - 0.16.x": + version "0.16.13" + resolved "https://registry.yarnpkg.com/systemjs-builder/-/systemjs-builder-0.16.13.tgz#02b47d03afd1e2f29562b11ec8bc13457e785c76" + dependencies: + babel-core "^6.24.1" + babel-plugin-syntax-dynamic-import "^6.18.0" + babel-plugin-transform-amd-system-wrapper "^0.3.7" + babel-plugin-transform-cjs-system-wrapper "^0.6.2" + babel-plugin-transform-es2015-modules-systemjs "^6.6.5" + babel-plugin-transform-global-system-wrapper "^0.3.4" + babel-plugin-transform-system-register "^0.0.1" + bluebird "^3.3.4" + data-uri-to-buffer "0.0.4" + es6-template-strings "^2.0.0" + glob "^7.0.3" + mkdirp "^0.5.1" + rollup "^0.58.2" + source-map "^0.5.3" + systemjs "^0.19.46" + traceur "0.0.105" + uglify-js "^2.6.1" + +systemjs@^0.19.46: + version "0.19.47" + resolved "https://registry.yarnpkg.com/systemjs/-/systemjs-0.19.47.tgz#c8c93937180f3f5481c769cd2720763fb4a31c6f" + dependencies: + when "^3.7.5" + +tiny-lr@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-1.1.1.tgz#9fa547412f238fedb068ee295af8b682c98b2aab" + dependencies: + body "^5.1.0" + debug "^3.1.0" + faye-websocket "~0.10.0" + livereload-js "^2.3.0" + object-assign "^4.1.0" + qs "^6.4.0" + +to-fast-properties@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320" + +to-fast-properties@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.3.tgz#b83571fa4d8c25b82e231b06e3a3055de4ca1a47" + +tough-cookie@^2.3.2: + version "2.4.3" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781" + dependencies: + psl "^1.1.24" + punycode "^1.4.1" + +tough-cookie@~2.3.3: + version "2.3.4" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.4.tgz#ec60cee38ac675063ffc97a5c18970578ee83655" + dependencies: + punycode "^1.4.1" + +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + +traceur@0.0.105: + version "0.0.105" + resolved "https://registry.yarnpkg.com/traceur/-/traceur-0.0.105.tgz#5cf9dee83d6b77861c3d6c44d53859aed7ab0479" + dependencies: + commander "2.9.x" + glob "5.0.x" + rsvp "^3.0.13" + semver "^4.3.3" + source-map-support "~0.2.8" + +trim-newlines@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613" + +trim-right@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + dependencies: + prelude-ls "~1.1.2" + +type-detect@0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-0.1.1.tgz#0ba5ec2a885640e470ea4e8505971900dac58822" + +type-detect@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-1.0.0.tgz#762217cc06db258ec48908a1298e8b95121e8ea2" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + +uglify-js@^2.6.1: + version "2.6.4" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.6.4.tgz#65ea2fb3059c9394692f15fed87c2b36c16b9adf" + dependencies: + async "~0.2.6" + source-map "~0.5.1" + uglify-to-browserify "~1.0.0" + yargs "~3.10.0" + +uglify-js@~2.8.21: + version "2.8.29" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.29.tgz#29c5733148057bb4e1f75df35b7a9cb72e6a59dd" + dependencies: + source-map "~0.5.1" + yargs "~3.10.0" + optionalDependencies: + uglify-to-browserify "~1.0.0" + +uglify-to-browserify@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7" + +underscore.string@~3.3.4: + version "3.3.5" + resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-3.3.5.tgz#fc2ad255b8bd309e239cbc5816fd23a9b7ea4023" + dependencies: + sprintf-js "^1.0.3" + util-deprecate "^1.0.2" + +uri-path@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/uri-path/-/uri-path-1.0.0.tgz#9747f018358933c31de0fccfd82d138e67262e32" + +util-deprecate@^1.0.2, util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + +uuid@^3.1.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.0.tgz#b237147804881d7b86f40a7ff8f590f15c37de32" + +validate-npm-package-license@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz#81643bcbef1bdfecd4623793dc4648948ba98338" + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +verror@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c" + dependencies: + extsprintf "1.0.2" + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + +webidl-conversions@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad" + +websocket-driver@>=0.5.1: + version "0.7.0" + resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.0.tgz#0caf9d2d755d93aee049d4bdd0d3fe2cca2a24eb" + dependencies: + http-parser-js ">=0.4.0" + websocket-extensions ">=0.1.1" + +websocket-extensions@>=0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29" + +whatwg-encoding@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.3.tgz#57c235bc8657e914d24e1a397d3c82daee0a6ba3" + dependencies: + iconv-lite "0.4.19" + +whatwg-url@^4.3.0: + version "4.8.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-4.8.0.tgz#d2981aa9148c1e00a41c5a6131166ab4683bbcc0" + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + +when@^3.7.5: + version "3.7.8" + resolved "https://registry.yarnpkg.com/when/-/when-3.7.8.tgz#c7130b6a7ea04693e842cdc9e7a1f2aa39a39f82" + +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= + +which@1.3.1, which@~1.3.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + dependencies: + isexe "^2.0.0" + +wide-align@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== + dependencies: + string-width "^1.0.2 || 2" + +window-size@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d" + +wordwrap@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f" + +wordwrap@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + +wrap-ansi@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09" + integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q== + dependencies: + ansi-styles "^3.2.0" + string-width "^3.0.0" + strip-ansi "^5.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + +xml-name-validator@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-2.0.1.tgz#4d8b8f1eccd3419aa362061becef515e1e559635" + +xtend@~4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" + +y18n@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b" + integrity sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w== + +yargs-parser@13.1.1, yargs-parser@^13.1.1: + version "13.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.1.tgz#d26058532aa06d365fe091f6a1fc06b2f7e5eca0" + integrity sha512-oVAVsHz6uFrg3XQheFII8ESO2ssAf9luWuAd6Wexsu4F3OtIW0o8IribPXYrD4WC24LWtPrJlGy87y5udK+dxQ== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-unparser@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-1.6.0.tgz#ef25c2c769ff6bd09e4b0f9d7c605fb27846ea9f" + integrity sha512-W9tKgmSn0DpSatfri0nx52Joq5hVXgeLiqR/5G0sZNDoLZFOr/xjBUDcShCOGNsBnEMNo1KAMBkTej1Hm62HTw== + dependencies: + flat "^4.1.0" + lodash "^4.17.15" + yargs "^13.3.0" + +yargs@13.3.0, yargs@^13.3.0: + version "13.3.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.0.tgz#4c657a55e07e5f2cf947f8a366567c04a0dedc83" + integrity sha512-2eehun/8ALW8TLoIl7MVaRUrg+yCnenu8B4kBlRxj3GJGDKU1Og7sMXPNm1BYyM1DOJmTZ4YeN/Nwxv+8XJsUA== + dependencies: + cliui "^5.0.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^13.1.1" + +yargs@~3.10.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1" + dependencies: + camelcase "^1.0.2" + cliui "^2.1.0" + decamelize "^1.0.0" + window-size "0.1.0" + +yarn@^1.22.0: + version "1.22.0" + resolved "https://registry.yarnpkg.com/yarn/-/yarn-1.22.0.tgz#acf82906e36bcccd1ccab1cfb73b87509667c881" + integrity sha512-KMHP/Jq53jZKTY9iTUt3dIVl/be6UPs2INo96+BnZHLKxYNTfwMmlgHTaMWyGZoO74RI4AIFvnWhYrXq2USJkg== diff --git a/src/connector/jdbc/readme.md b/src/connector/jdbc/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..e81f078c153046265cbe9a856f7b48e26fc071fc --- /dev/null +++ b/src/connector/jdbc/readme.md @@ -0,0 +1,329 @@ + +## TAOS-JDBCDriver 概述 + +TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 + +由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 + +* libtaos.so + 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 + +* taos.dll + 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 + +> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。 + +TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: + +* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 +* 由于不支持删除和修改,所以也不支持事务操作。 +* 目前不支持表间的 union 操作。 +* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。 + + +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 + +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| --- | --- | --- | +| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | Java DataType | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT, TINYINT |java.lang.Short | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | + +## 如何获取 TAOS-JDBCDriver + +### maven 仓库 + +目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 +* [sonatype][8] +* [mvnrepository][9] +* [maven.aliyun][10] + +maven 项目中使用如下 pom.xml 配置即可: + +```xml + + + com.taosdata.jdbc + taos-jdbcdriver + 1.0.3 + + +``` + +### 源码编译打包 + +下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 + + +## 使用说明 + +### 获取连接 + +如下所示配置即可获取 TDengine Connection: +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` +> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 + +TDengine 的 JDBC URL 规范格式为: +`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: + +* user:登录 TDengine 用户名,默认值 root。 +* password:用户登录密码,默认值 taosdata。 +* charset:客户端使用的字符集,默认值为系统字符集。 +* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* locale:客户端语言环境,默认值系统当前 locale。 +* timezone:客户端使用的时区,默认值为系统当前时区。 + +以上参数可以在 3 处配置,`优先级由高到低`分别如下: +1. JDBC URL 参数 + 如上所述,可以在 JDBC URL 的参数中指定。 +2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +3. 客户端配置文件 taos.cfg + + linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 +```properties +# client default username +# defaultUser root + +# client default password +# defaultPass taosdata + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` +> 更多详细配置请参考[客户端配置][13] + +### 创建数据库和表 + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` +> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 + +### 插入数据 + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` +> now 为系统内部函数,默认为服务器当前时间。 +> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 + +### 查询数据 + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` +> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 + + +### 关闭资源 + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` +> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 +## 与连接池使用 + +**HikariCP** + +* 引入相应 HikariCP maven 依赖: +```xml + + com.zaxxer + HikariCP + 3.4.1 + +``` + +* 使用示例如下: +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMinimumIdle(3); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool + config.setIdleTimeout(60000); // max idle time for recycle idle connection + config.setConnectionTestQuery("describe log.dn"); //validation query + config.setValidationTimeout(3000); //validation query timeout + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 +> 更多 HikariCP 使用问题请查看[官方说明][5] + +**Druid** + +* 引入相应 Druid maven 依赖: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* 使用示例如下: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> 更多 druid 使用问题请查看[官方说明][6] + +**注意事项** +* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 + +如下所示,`select server_status()` 执行成功会返回 `1`。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +## 与框架使用 + +* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11] +* Springboot + Mybatis 中使用,可参考 [springbootdemo][12] + +## 常见问题 + +* java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **原因**:程序没有找到依赖的本地函数库 taos。 + + **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + +* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **原因**:目前 TDengine 只支持 64 位 JDK。 + + **解决方法**:重新安装 64 位 JDK。 + +* 其它问题请参考 [Issues][7] + + + +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE \ No newline at end of file diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index 86a3489d0789d19a73f5318563569d4527845313..505619436cc1ad5d01a4134aede29477c6f6ae48 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -13,14 +13,14 @@ def _convert_microsecond_to_datetime(micro): def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - _timstamp_converter = _convert_millisecond_to_datetime + _timestamp_converter = _convert_millisecond_to_datetime if micro: - _timstamp_converter = _convert_microsecond_to_datetime + _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) else: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query @@ -275,8 +312,8 @@ class CTaosInterface(object): if num_of_rows == 0: return None, 0 - blocks = [None] * len(fields) isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] @@ -351,4 +388,20 @@ class CTaosInterface(object): def errStr(connection): """Return the error styring """ - return CTaosInterface.libtaos.taos_errstr(connection) \ No newline at end of file + return CTaosInterface.libtaos.taos_errstr(connection) + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/linux/python2/taos/connection.py index ba24209552600d6ee75258f929eeff829dd7b675..04fbbdec04144624a0b1f4ba25083a91ade21cce 100644 --- a/src/connector/python/linux/python2/taos/connection.py +++ b/src/connector/python/linux/python2/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -50,6 +50,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/linux/python2/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..2d01395532820c3bd0e068ef7eb3d425eaaa6d78 --- /dev/null +++ b/src/connector/python/linux/python2/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index 259c8bbd060b44f7c1b60b5c015519ed862c8ec2..7fcedc9fe9400cc8db007897906d4568c2eb234f 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py index ba24209552600d6ee75258f929eeff829dd7b675..04fbbdec04144624a0b1f4ba25083a91ade21cce 100644 --- a/src/connector/python/linux/python3/taos/connection.py +++ b/src/connector/python/linux/python3/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -50,6 +50,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cf10d5ada578687689b94454378dd543368e3e --- /dev/null +++ b/src/connector/python/linux/python3/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py index 8e3b7019290153a8bce475a5f2db43bc8ab04399..f8cdfcc51ea1ea9ae5789c47f2b9e54879a53934 100644 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ b/src/connector/python/windows/python2/taos/cinterface.py @@ -13,14 +13,14 @@ def _convert_microsecond_to_datetime(micro): def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - _timstamp_converter = _convert_millisecond_to_datetime + _timestamp_converter = _convert_millisecond_to_datetime if micro: - _timstamp_converter = _convert_microsecond_to_datetime + _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) else: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query @@ -275,8 +312,8 @@ class CTaosInterface(object): if num_of_rows == 0: return None, 0 - blocks = [None] * len(fields) isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] @@ -351,4 +388,20 @@ class CTaosInterface(object): def errStr(connection): """Return the error styring """ - return CTaosInterface.libtaos.taos_errstr(connection) \ No newline at end of file + return CTaosInterface.libtaos.taos_errstr(connection) + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py index ba24209552600d6ee75258f929eeff829dd7b675..e2783975d9c0f63c82a90b41a11fbaa0a3ffb5ac 100644 --- a/src/connector/python/windows/python2/taos/connection.py +++ b/src/connector/python/windows/python2/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -15,7 +15,8 @@ class TDengineConnection(object): self._config = None self._chandle = None - self.config(**kwargs) + if len(kwargs) > 0: + self.config(**kwargs) def config(self, **kwargs): # host @@ -50,6 +51,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cf10d5ada578687689b94454378dd543368e3e --- /dev/null +++ b/src/connector/python/windows/python2/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py index 2cddf5fccf606be3d6dc60a538636204ee59565c..b4b44e199c37cf90c9beddb16433591bc0713b23 100644 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ b/src/connector/python/windows/python3/taos/cinterface.py @@ -1,370 +1,407 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) - else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] - else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] - else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] - else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] - else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - - res = [] - - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - except ValueError: - res.append(None) - - return res - # if num_of_rows > 0: - # for i in range(abs(num_of_rows)): - # try: - # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - # except ValueError: - # res.append(None) - # return res - # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] - # else: - # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python -} - -# Corresponding TAOS_FIELD structure in C -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 64), - ('bytes', ctypes.c_short), - ('type', ctypes.c_char)] - -# C interface class -class CTaosInterface(object): - - libtaos = ctypes.windll.LoadLibrary('taos') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config != None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value == None: - print('connect to TDengine failed') - # sys.exit(1) - else: - print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(connection): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(connection) - - @staticmethod - def useResult(connection): - '''Use result after calling self.query - ''' - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(connection)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return result, fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - - if num_of_rows == 0: - return None, 0 - - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") - - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(connection): - return CTaosInterface.libtaos.taos_field_count(connection) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(connection): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(connection) - - @staticmethod - def errStr(connection): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - - print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) - print('Affected rows: {}'.format(cinter.affectedRows(conn))) - - result, des = CTaosInterface.useResult(conn) - - data, num_of_rows = CTaosInterface.fetchBlock(result, des) - - print(data) - +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli/1000.0) + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro/1000000.0) + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timestamp_converter = _convert_millisecond_to_datetime + if micro: + _timestamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + else: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + if num_of_rows > 0: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + + res = [] + + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + else: + res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + except ValueError: + res.append(None) + + return res + # if num_of_rows > 0: + # for i in range(abs(num_of_rows)): + # try: + # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + # except ValueError: + # res.append(None) + # return res + # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] + # else: + # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT : _crow_tinyint_to_python, + FieldType.C_SMALLINT : _crow_smallint_to_python, + FieldType.C_INT : _crow_int_to_python, + FieldType.C_BIGINT : _crow_bigint_to_python, + FieldType.C_FLOAT : _crow_float_to_python, + FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP : _crow_timestamp_to_python, + FieldType.C_NCHAR : _crow_nchar_to_python +} + +# Corresponding TAOS_FIELD structure in C +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 64), + ('bytes', ctypes.c_short), + ('type', ctypes.c_char)] + +# C interface class +class CTaosInterface(object): + + libtaos = ctypes.windll.LoadLibrary('taos') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config != None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value == None: + print('connect to TDengine failed') + # sys.exit(1) + else: + print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + # finally: + # CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(connection): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(connection) + + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + + @staticmethod + def useResult(connection): + '''Use result after calling self.query + ''' + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(connection)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return result, fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + + if num_of_rows == 0: + return None, 0 + + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) + + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(connection): + return CTaosInterface.libtaos.taos_field_count(connection) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(connection): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(connection) + + @staticmethod + def errStr(connection): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8') + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py index a88e25a6db6f187f2a257303189c66851bb260f6..e2783975d9c0f63c82a90b41a11fbaa0a3ffb5ac 100644 --- a/src/connector/python/windows/python3/taos/connection.py +++ b/src/connector/python/windows/python3/taos/connection.py @@ -1,81 +1,89 @@ -# from .cursor import TDengineCursor -from .cursor import TDengineCursor -from .cinterface import CTaosInterface - -class TDengineConnection(object): - """ TDengine connection object - """ - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - if len(kwargs) > 0: - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - result = self._chandle.useResult(self._conn)[0] - if result: - self._chandle.freeResult(result) - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() +from .cursor import TDengineCursor +from .subscription import TDengineSubscription +from .cinterface import CTaosInterface + +class TDengineConnection(object): + """ TDengine connection object + """ + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + if len(kwargs) > 0: + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + result = self._chandle.useResult(self._conn)[0] + if result: + self._chandle.freeResult(result) + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() print("Hello world") \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cf10d5ada578687689b94454378dd543368e3e --- /dev/null +++ b/src/connector/python/windows/python3/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/inc/hash.h b/src/inc/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..14c73fb37015042f2be0dd31be89ba59374ce098 --- /dev/null +++ b/src/inc/hash.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HASH_H +#define TDENGINE_HASH_H + +#include "hashutil.h" + +#define HASH_MAX_CAPACITY (1024 * 1024 * 16) +#define HASH_VALUE_IN_TRASH (-1) +#define HASH_DEFAULT_LOAD_FACTOR (0.75) +#define HASH_INDEX(v, c) ((v) & ((c)-1)) + +typedef struct SHashNode { + char *key; // null-terminated string + union { + struct SHashNode * prev; + struct SHashEntry *prev1; + }; + + struct SHashNode *next; + uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash + uint32_t keyLen; // length of the key + char data[]; +} SHashNode; + +typedef struct SHashEntry { + SHashNode *next; + uint32_t num; +} SHashEntry; + +typedef struct HashObj { + SHashEntry **hashList; + uint32_t capacity; // number of slots + int size; // number of elements in hash table + _hash_fn_t hashFp; // hash function + bool multithreadSafe; // enable lock or not + +#if defined LINUX + pthread_rwlock_t lock; +#else + pthread_mutex_t lock; +#endif + +} HashObj; + +void *taosInitHashTable(uint32_t capacity, _hash_fn_t fn, bool multithreadSafe); +void taosDeleteFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen); + +int32_t taosAddToHashTable(HashObj *pObj, const char *key, uint32_t keyLen, void *data, uint32_t size); +int32_t taosNumElemsInHashTable(HashObj *pObj); + +char *taosGetDataFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen); + + +void taosCleanUpHashTable(void *handle); + +int32_t taosGetHashMaxOverflowLength(HashObj *pObj); + +int32_t taosCheckHashTable(HashObj *pObj); + +#endif // TDENGINE_HASH_H diff --git a/src/inc/hashutil.h b/src/inc/hashutil.h new file mode 100644 index 0000000000000000000000000000000000000000..047f1889d78d6f8559bd0e320a0e9bae2beaa681 --- /dev/null +++ b/src/inc/hashutil.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HASHUTIL_H +#define TDENGINE_HASHUTIL_H + +#include "os.h" + +typedef uint32_t (*_hash_fn_t)(const char *, uint32_t); + +/** + * murmur hash algorithm + * @key usually string + * @len key length + * @seed hash seed + * @out an int32 value + */ +uint32_t MurmurHash3_32(const char *key, uint32_t len); + +/** + * + * @param key + * @param len + * @return + */ +uint32_t taosIntHash_32(const char *key, uint32_t len); + +uint32_t taosIntHash_64(const char *key, uint32_t len); + +_hash_fn_t taosGetDefaultHashFunction(int32_t type); + +#endif //TDENGINE_HASHUTIL_H diff --git a/src/inc/sdb.h b/src/inc/sdb.h index 389aecfb7b6a3ea047251d45d853b8f3e873021d..a0e0a1b2f2e8815f5425a65f6f793fad4b0bc847 100644 --- a/src/inc/sdb.h +++ b/src/inc/sdb.h @@ -105,7 +105,7 @@ extern SSdbPeer *sdbPeer[]; #endif -void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, uint8_t keyType, char *directory, void *(*appTool)(char, void *, char *, int, int *)); void *sdbGetRow(void *handle, void *key); diff --git a/src/inc/sql.y b/src/inc/sql.y index 3d0ded56ebd245dd7591378f1f5b643f532f2a33..2b7e0b628cb0cf951db287722036523749e0fdd8 100644 --- a/src/inc/sql.y +++ b/src/inc/sql.y @@ -31,7 +31,7 @@ } %syntax_error { - pInfo->validSql = false; + pInfo->valid = false; int32_t outputBufLen = tListLen(pInfo->pzErrMsg); int32_t len = 0; @@ -59,25 +59,25 @@ program ::= cmd. {} //////////////////////////////////THE SHOW STATEMENT/////////////////////////////////////////// -cmd ::= SHOW DATABASES. { setDCLSQLElems(pInfo, SHOW_DATABASES, 0);} -cmd ::= SHOW MNODES. { setDCLSQLElems(pInfo, SHOW_MNODES, 0);} -cmd ::= SHOW DNODES. { setDCLSQLElems(pInfo, SHOW_DNODES, 0);} -cmd ::= SHOW ACCOUNTS. { setDCLSQLElems(pInfo, SHOW_ACCOUNTS, 0);} -cmd ::= SHOW USERS. { setDCLSQLElems(pInfo, SHOW_USERS, 0);} - -cmd ::= SHOW MODULES. { setDCLSQLElems(pInfo, SHOW_MODULES, 0); } -cmd ::= SHOW QUERIES. { setDCLSQLElems(pInfo, SHOW_QUERIES, 0); } -cmd ::= SHOW CONNECTIONS.{ setDCLSQLElems(pInfo, SHOW_CONNECTIONS, 0);} -cmd ::= SHOW STREAMS. { setDCLSQLElems(pInfo, SHOW_STREAMS, 0); } -cmd ::= SHOW CONFIGS. { setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); } -cmd ::= SHOW SCORES. { setDCLSQLElems(pInfo, SHOW_SCORES, 0); } -cmd ::= SHOW GRANTS. { setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } - -cmd ::= SHOW VNODES. { setDCLSQLElems(pInfo, SHOW_VNODES, 0); } -cmd ::= SHOW VNODES IPTOKEN(X). { setDCLSQLElems(pInfo, SHOW_VNODES, 1, &X); } +cmd ::= SHOW DATABASES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} +cmd ::= SHOW MNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} +cmd ::= SHOW DNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} +cmd ::= SHOW ACCOUNTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} +cmd ::= SHOW USERS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} + +cmd ::= SHOW MODULES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } +cmd ::= SHOW QUERIES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } +cmd ::= SHOW CONNECTIONS.{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} +cmd ::= SHOW STREAMS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } +cmd ::= SHOW CONFIGS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_CONFIGS, 0, 0); } +cmd ::= SHOW SCORES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } +cmd ::= SHOW GRANTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } + +cmd ::= SHOW VNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } +cmd ::= SHOW VNODES IPTOKEN(X). { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &X, 0); } %type dbPrefix {SSQLToken} -dbPrefix(A) ::=. {A.n = 0;} +dbPrefix(A) ::=. {A.n = 0; A.type = 0;} dbPrefix(A) ::= ids(X) DOT. {A = X; } %type cpxName {SSQLToken} @@ -85,60 +85,66 @@ cpxName(A) ::= . {A.n = 0; } cpxName(A) ::= DOT ids(Y). {A = Y; A.n += 1; } cmd ::= SHOW dbPrefix(X) TABLES. { - setDCLSQLElems(pInfo, SHOW_TABLES, 1, &X); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &X, 0); } cmd ::= SHOW dbPrefix(X) TABLES LIKE ids(Y). { - setDCLSQLElems(pInfo, SHOW_TABLES, 2, &X, &Y); + setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &X, &Y); } cmd ::= SHOW dbPrefix(X) STABLES. { - setDCLSQLElems(pInfo, SHOW_STABLES, 1, &X); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &X, 0); } cmd ::= SHOW dbPrefix(X) STABLES LIKE ids(Y). { SSQLToken token; setDBName(&token, &X); - setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &Y); + setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &Y); } cmd ::= SHOW dbPrefix(X) VGROUPS. { SSQLToken token; setDBName(&token, &X); - setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); +} + +cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). { + SSQLToken token; + setDBName(&token, &X); + setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y); } //drop configure for tables cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). { X.n += Z.n; - setDCLSQLElems(pInfo, DROP_TABLE, 2, &X, &Y); + setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y); } -cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDCLSQLElems(pInfo, DROP_DATABASE, 2, &X, &Y); } -cmd ::= DROP DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, DROP_DNODE, 1, &X); } -cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, DROP_USER, 1, &X); } -cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, DROP_ACCOUNT, 1, &X); } +cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y); } +cmd ::= DROP DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &X); } +cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &X); } +cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &X); } /////////////////////////////////THE USE STATEMENT////////////////////////////////////////// -cmd ::= USE ids(X). { setDCLSQLElems(pInfo, USE_DATABASE, 1, &X);} +cmd ::= USE ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_USE_DB, 1, &X);} /////////////////////////////////THE DESCRIBE STATEMENT///////////////////////////////////// cmd ::= DESCRIBE ids(X) cpxName(Y). { X.n += Y.n; - setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &X); + setDCLSQLElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &X); } /////////////////////////////////THE ALTER STATEMENT//////////////////////////////////////// -cmd ::= ALTER USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &X, &Y); } -cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &X, &Y);} -cmd ::= ALTER DNODE IPTOKEN(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_DNODE, 2, &X, &Y); } -cmd ::= ALTER DNODE IPTOKEN(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, ALTER_DNODE, 3, &X, &Y, &Z); } -cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &X); } -cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_LOCAL, 2, &X, &Y); } -cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &X, &Y, &t);} +cmd ::= ALTER USER ids(X) PASS ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &X, &Y, NULL); } +cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &X, NULL, &Y);} +cmd ::= ALTER DNODE IPTOKEN(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &X, &Y); } +cmd ::= ALTER DNODE IPTOKEN(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &X, &Y, &Z); } +cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &X); } +cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &X, &Y); } +cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &X, &Y, &t);} -cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { SSQLToken t = {0}; setCreateAcctSQL(pInfo, ALTER_ACCT, &X, &t, &Z);} -cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, ALTER_ACCT, &X, &Y, &Z);} +cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, NULL, &Z);} +cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, &Y, &Z);} // An IDENTIFIER can be a generic identifier, or one of several keywords. // Any non-standard keyword can also be an identifier. @@ -157,11 +163,11 @@ ifnotexists(X) ::= . {X.n = 0;} /////////////////////////////////THE CREATE STATEMENT/////////////////////////////////////// //create option for dnode/db/user/account -cmd ::= CREATE DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, CREATE_DNODE, 1, &X);} +cmd ::= CREATE DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &X);} cmd ::= CREATE ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). - { setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &X, &Y, &Z);} -cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, CREATE_DATABASE, &X, &Y, &Z);} -cmd ::= CREATE USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, CREATE_USER, 2, &X, &Y);} + { setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &X, &Y, &Z);} +cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &X, &Y, &Z);} +cmd ::= CREATE USER ids(X) PASS ids(Y). { setCreateUserSQL(pInfo, &X, &Y);} pps(Y) ::= . {Y.n = 0; } pps(Y) ::= PPS INTEGER(X). {Y = X; } @@ -192,14 +198,14 @@ state(Y) ::= STATE ids(X). {Y = X; } %type acct_optr {SCreateAcctSQL} acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K) conns(L) state(M). { - Y.users = (K.n>0)?atoi(K.z):-1; - Y.dbs = (E.n>0)?atoi(E.z):-1; - Y.tseries = (D.n>0)?atoi(D.z):-1; - Y.streams = (F.n>0)?atoi(F.z):-1; - Y.pps = (C.n>0)?atoi(C.z):-1; - Y.storage = (P.n>0)?strtoll(P.z, NULL, 10):-1; - Y.qtime = (Q.n>0)?strtoll(Q.z, NULL, 10):-1; - Y.conns = (L.n>0)?atoi(L.z):-1; + Y.maxUsers = (K.n>0)?atoi(K.z):-1; + Y.maxDbs = (E.n>0)?atoi(E.z):-1; + Y.maxTimeSeries = (D.n>0)?atoi(D.z):-1; + Y.maxStreams = (F.n>0)?atoi(F.z):-1; + Y.maxPointsPerSecond = (C.n>0)?atoi(C.z):-1; + Y.maxStorage = (P.n>0)?strtoll(P.z, NULL, 10):-1; + Y.maxQueryTime = (Q.n>0)?strtoll(Q.z, NULL, 10):-1; + Y.maxConnections = (L.n>0)?atoi(L.z):-1; Y.stat = M; } @@ -264,29 +270,29 @@ cmd ::= CREATE TABLE ifnotexists(Y) ids(X) cpxName(Z) create_table_args. { %type create_table_args{SCreateTableSQL*} create_table_args(A) ::= LP columnlist(X) RP. { - A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METER); + A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } -// create metric +// create super table create_table_args(A) ::= LP columnlist(X) RP TAGS LP columnlist(Y) RP. { - A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METRIC); + A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_STABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } -// create meter by using metric -// create meter meter_name using metric_name tags(tag_values1, tag_values2) +// create table by using super table +// create table table_name using super_table_name tags(tag_values1, tag_values2) create_table_args(A) ::= USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. { X.n += F.n; - A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_METER_FROM_METRIC); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_METER_FROM_METRIC); + A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_TABLE_FROM_STABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } // create stream -// create table table_name as select count(*) from metric_name interval(time) +// create table table_name as select count(*) from super_table_name interval(time) create_table_args(A) ::= AS select(S). { A = tSetCreateSQLElems(NULL, NULL, NULL, NULL, S, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, A, NULL, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); } %type column{TAOS_FIELD} @@ -343,16 +349,22 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). { } //////////////////////// The SELECT statement ///////////////////////////////// -cmd ::= select(X). { - setSQLInfo(pInfo, X, NULL, TSQL_QUERY_METER); -} - %type select {SQuerySQL*} -%destructor select {destroyQuerySql($$);} +%destructor select {doDestroyQuerySql($$);} select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { A = tSetQuerySQLElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G); } +%type union {SSubclauseInfo*} +%destructor union {destroyAllSelectClause($$);} + +union(Y) ::= select(X). { Y = setSubclause(NULL, X); } +union(Y) ::= LP union(X) RP. { Y = X; } +union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); } +union(Y) ::= union(Z) UNION ALL LP select(X) RP. { Y = appendSelectClause(Z, X); } + +cmd ::= union(X). { setSQLInfo(pInfo, X, NULL, TSDB_SQL_SELECT); } + // Support for the SQL exprssion without from & where subclauses, e.g., // select current_database(), // select server_version(), select client_version(), @@ -572,34 +584,14 @@ exprlist(A) ::= expritem(X). {A = tSQLExprListAppend(0,X,0);} expritem(A) ::= expr(X). {A = X;} expritem(A) ::= . {A = 0;} -////////////////////////// The INSERT command ///////////////////////////////// -// add support "values() values() values() tags()" operation.... -cmd ::= INSERT INTO cpxName(X) insert_value_list(K). { - tSetInsertSQLElems(pInfo, &X, K); -} - -%type insert_value_list {tSQLExprListList*} -insert_value_list(X) ::= VALUES LP itemlist(Y) RP. {X = tSQLListListAppend(NULL, Y);} -insert_value_list(X) ::= insert_value_list(K) VALUES LP itemlist(Y) RP. -{X = tSQLListListAppend(K, Y);} - -//cmd ::= INSERT INTO cpxName(X) select(S). -// {sqliteInsert(pParse, sqliteSrcListAppend(0,&X,&D), 0, S, F, R);} - -%type itemlist {tSQLExprList*} -%destructor itemlist {tSQLExprListDestroy($$);} - -itemlist(A) ::= itemlist(X) COMMA expr(Y). {A = tSQLExprListAppend(X,Y,0);} -itemlist(A) ::= expr(X). {A = tSQLExprListAppend(0,X,0);} - ///////////////////////////////////reset query cache////////////////////////////////////// -cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);} +cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} ///////////////////////////////////ALTER TABLE statement////////////////////////////////// cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). { X.n += F.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_ADD_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { @@ -608,15 +600,15 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { toTSDBType(A.type); tVariantList* K = tVariantListAppendToken(NULL, &A, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, ALTER_TABLE_DROP_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } //////////////////////////////////ALTER TAGS statement///////////////////////////////////// cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). { X.n += Y.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_TAGS_ADD); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; @@ -624,8 +616,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { toTSDBType(Y.type); tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_DROP); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { @@ -637,8 +629,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { toTSDBType(Z.type); A = tVariantListAppendToken(A, &Z, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_CHG); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { @@ -648,17 +640,18 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); A = tVariantListAppend(A, &Z, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_SET); - setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); + setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } ////////////////////////////////////////kill statement/////////////////////////////////////// -cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &X);} -cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &X);} -cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &X);} +cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &X);} +cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);} +cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);} %fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL COUNT SUM AVG MIN MAX FIRST LAST TOP BOTTOM STDDEV PERCENTILE APERCENTILE LEASTSQUARES HISTOGRAM DIFF - SPREAD TWA INTERP LAST_ROW NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT METRIC TBNAME JOIN METRICS STABLE NULL. + SPREAD TWA INTERP LAST_ROW RATE IRATE SUM_RATE SUM_IRATE AVG_RATE AVG_IRATE NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT + METRIC TBNAME JOIN METRICS STABLE NULL INSERT INTO VALUES. diff --git a/src/inc/taos.h b/src/inc/taos.h index 2fd6d8be927a310e0131b62a8f0ecf55ae943ef2..d9db79fbcb74e6935ab052c702b72129cd4cc132 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -57,10 +57,16 @@ typedef struct taosField { char type; } TAOS_FIELD; -void taos_init(); -int taos_options(TSDB_OPTION option, const void *arg, ...); -TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); -void taos_close(TAOS *taos); +#ifdef _TD_GO_DLL_ + #define DLL_EXPORT __declspec(dllexport) +#else + #define DLL_EXPORT +#endif + +DLL_EXPORT void taos_init(); +DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); +DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); +DLL_EXPORT void taos_close(TAOS *taos); typedef struct TAOS_BIND { int buffer_type; @@ -80,18 +86,18 @@ int taos_stmt_execute(TAOS_STMT *stmt); TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); int taos_stmt_close(TAOS_STMT *stmt); -int taos_query(TAOS *taos, const char *sql); -TAOS_RES *taos_use_result(TAOS *taos); -TAOS_ROW taos_fetch_row(TAOS_RES *res); -int taos_result_precision(TAOS_RES *res); // get the time precision of result -void taos_free_result(TAOS_RES *res); -int taos_field_count(TAOS *taos); -int taos_num_fields(TAOS_RES *res); -int taos_affected_rows(TAOS *taos); -TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); -int taos_select_db(TAOS *taos, const char *db); -int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -void taos_stop_query(TAOS_RES *res); +DLL_EXPORT int taos_query(TAOS *taos, const char *sql); +DLL_EXPORT TAOS_RES *taos_use_result(TAOS *taos); +DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); +DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result +DLL_EXPORT void taos_free_result(TAOS_RES *res); +DLL_EXPORT int taos_field_count(TAOS *taos); +DLL_EXPORT int taos_num_fields(TAOS_RES *res); +DLL_EXPORT int taos_affected_rows(TAOS *taos); +DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); +DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); +DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +DLL_EXPORT void taos_stop_query(TAOS_RES *res); int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); int taos_validate_sql(TAOS *taos, const char *sql); @@ -100,27 +106,26 @@ int taos_validate_sql(TAOS *taos, const char *sql); // TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild); // TODO: the return value should be `const` -char *taos_get_server_info(TAOS *taos); -char *taos_get_client_info(); -char *taos_errstr(TAOS *taos); +DLL_EXPORT char *taos_get_server_info(TAOS *taos); +DLL_EXPORT char *taos_get_client_info(); +DLL_EXPORT char *taos_errstr(TAOS *taos); -int taos_errno(TAOS *taos); +DLL_EXPORT int taos_errno(TAOS *taos); -void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param); -void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); -void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); +DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param); +DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); +DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); -TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *table, int64_t time, int mseconds); -TAOS_ROW taos_consume(TAOS_SUB *tsub); -void taos_unsubscribe(TAOS_SUB *tsub); -int taos_subfields_count(TAOS_SUB *tsub); -TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub); +typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code); +DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval); +DLL_EXPORT TAOS_RES *taos_consume(TAOS_SUB *tsub); +DLL_EXPORT void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress); -TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), +DLL_EXPORT TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)); -void taos_close_stream(TAOS_STREAM *tstr); +DLL_EXPORT void taos_close_stream(TAOS_STREAM *tstr); -int taos_load_table_info(TAOS *taos, const char* tableNameList); +DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList); #ifdef __cplusplus } diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 78aeaad47592801703572c25c26233f83e5048f7..edf0ab24a169c0a24ab1f8e554e7c0282c92a47f 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -125,10 +125,10 @@ extern "C" { #define TSDB_CODE_BATCH_SIZE_TOO_BIG 104 #define TSDB_CODE_TIMESTAMP_OUT_OF_RANGE 105 #define TSDB_CODE_INVALID_QUERY_MSG 106 // failed to validate the sql expression msg by vnode -#define TSDB_CODE_CACHE_BLOCK_TS_DISORDERED 107 // time stamp in cache block is disordered +#define TSDB_CODE_SORTED_RES_TOO_MANY 107 // too many result for ordered super table projection query #define TSDB_CODE_FILE_BLOCK_TS_DISORDERED 108 // time stamp in file block is disordered #define TSDB_CODE_INVALID_COMMIT_LOG 109 // commit log init failed -#define TSDB_CODE_SERVER_NO_SPACE 110 +#define TSDB_CODE_SERV_NO_DISKSPACE 110 #define TSDB_CODE_NOT_SUPER_TABLE 111 // operation only available for super table #define TSDB_CODE_DUPLICATE_TAGS 112 // tags value for join not unique #define TSDB_CODE_INVALID_SUBMIT_MSG 113 @@ -136,6 +136,10 @@ extern "C" { #define TSDB_CODE_INVALID_TABLE_ID 115 #define TSDB_CODE_INVALID_VNODE_STATUS 116 #define TSDB_CODE_FAILED_TO_LOCK_RESOURCES 117 +#define TSDB_CODE_TABLE_ID_MISMATCH 118 +#define TSDB_CODE_QUERY_CACHE_ERASED 119 + +#define TSDB_CODE_MAX_ERROR_CODE 120 #ifdef __cplusplus } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index c1820a5b9c79ecad82bccd951e4bfb1a7c605768..883906b617053af79dd7292246a315a5fe13f8b7 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -74,10 +74,10 @@ extern "C" { #define TSDB_MSG_TYPE_CREATE_MNODE_RSP 44 #define TSDB_MSG_TYPE_DROP_MNODE 45 #define TSDB_MSG_TYPE_DROP_MNODE_RSP 46 -#define TSDB_MSG_TYPE_CREATE_PNODE 47 -#define TSDB_MSG_TYPE_CREATE_PNODE_RSP 48 -#define TSDB_MSG_TYPE_DROP_PNODE 49 -#define TSDB_MSG_TYPE_DROP_PNODE_RSP 50 +#define TSDB_MSG_TYPE_CREATE_DNODE 47 +#define TSDB_MSG_TYPE_CREATE_DNODE_RSP 48 +#define TSDB_MSG_TYPE_DROP_DNODE 49 +#define TSDB_MSG_TYPE_DROP_DNODE_RSP 50 #define TSDB_MSG_TYPE_CREATE_DB 51 #define TSDB_MSG_TYPE_CREATE_DB_RSP 52 #define TSDB_MSG_TYPE_DROP_DB 53 @@ -147,7 +147,7 @@ enum _mgmt_table { TSDB_MGMT_TABLE_USER, TSDB_MGMT_TABLE_DB, TSDB_MGMT_TABLE_TABLE, - TSDB_MGMT_TABLE_PNODE, + TSDB_MGMT_TABLE_DNODE, TSDB_MGMT_TABLE_MNODE, TSDB_MGMT_TABLE_VGROUP, TSDB_MGMT_TABLE_METRIC, @@ -222,6 +222,7 @@ typedef struct { // internal part uint32_t destId; + uint32_t destIp; char meterId[TSDB_UNI_LEN]; uint16_t port; // for UDP only char empty[1]; @@ -279,7 +280,7 @@ typedef struct { } SShellSubmitMsg; typedef struct SSchema { - char type; + uint8_t type; char name[TSDB_COL_NAME_LEN]; short colId; short bytes; @@ -311,7 +312,7 @@ typedef struct { typedef struct { char db[TSDB_METER_ID_LEN]; - short ignoreNotExists; + uint8_t ignoreNotExists; } SDropDbMsg, SUseDbMsg; typedef struct { @@ -350,6 +351,7 @@ typedef struct { } SAlterTableMsg; typedef struct { + char clientVersion[TSDB_VERSION_LEN]; char db[TSDB_METER_ID_LEN]; } SConnectMsg; @@ -487,7 +489,8 @@ typedef struct SColumnInfo { */ typedef struct SMeterSidExtInfo { int32_t sid; - void * pObj; + int64_t uid; + TSKEY key; // key for subscription char tags[]; } SMeterSidExtInfo; @@ -504,7 +507,6 @@ typedef struct { uint64_t uid; TSKEY skey; TSKEY ekey; - int32_t num; int16_t order; int16_t orderColId; @@ -513,7 +515,8 @@ typedef struct { char intervalTimeUnit; // time interval type, for revisement of interval(1d) int64_t nAggTimeInterval; // time interval for aggregation, in million second - + int64_t slidingTime; // value for sliding window + // tag schema, used to parse tag information in pSidExtInfo uint64_t pTagSchema; @@ -622,7 +625,7 @@ typedef struct { char repStrategy; char loadLatest; // load into mem or not - char precision; // time resoluation + uint8_t precision; // time resolution char reserved[16]; } SVnodeCfg, SCreateDbMsg, SDbCfg, SAlterDbMsg; @@ -662,9 +665,10 @@ typedef struct { // internal message typedef struct { uint32_t destId; + uint32_t destIp; char meterId[TSDB_UNI_LEN]; char empty[3]; - char msgType; + uint8_t msgType; int32_t msgLen; uint8_t content[0]; } SIntMsg; @@ -724,9 +728,7 @@ typedef struct { int32_t numOfMeters; int32_t join; int32_t joinCondLen; // for join condition - int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM]; - } SMetricMetaMsg; typedef struct { diff --git a/src/inc/tcache.h b/src/inc/tcache.h index 93bbf22cd3752589731375a32da3da78c635b956..b577c53ea8dbcdc9f069288b94d0244907e77f12 100644 --- a/src/inc/tcache.h +++ b/src/inc/tcache.h @@ -86,6 +86,26 @@ void taosCleanUpDataCache(void *handle); */ void taosClearDataCache(void *handle); +/** + * Add one reference count for the exist data, and assign this data for a new owner. + * The new owner needs to invoke the taosRemoveDataFromCache when it does not need this data anymore. + * This procedure is a faster version of taosGetDataFromCache function, which avoids the sideeffect of the problem of the + * data is moved to trash, and taosGetDataFromCache will fail to retrieve it again. + * + * @param handle + * @param data + * @return + */ +void* taosGetDataFromExists(void* handle, void* data); + +/** + * transfer the ownership of data in cache to another object without increasing reference count. + * @param handle + * @param data + * @return + */ +void* taosTransferDataInCache(void* handle, void** data); + #ifdef __cplusplus } #endif diff --git a/src/inc/textbuffer.h b/src/inc/textbuffer.h index c7de20bd746d5889cc9d2c6407743ecace557bae..b46b98ed382e207d77abdff8a8bd2f41408f5fd5 100644 --- a/src/inc/textbuffer.h +++ b/src/inc/textbuffer.h @@ -19,20 +19,14 @@ extern "C" { #endif -#include -#include -#include - -#include "tutil.h" +#include "os.h" #include "taosmsg.h" +#include "tutil.h" -#define DEFAULT_PAGE_SIZE 16384 // 16k larger than the SHistoInfo -#define MIN_BUFFER_SIZE (1 << 19) -#define MAX_TMPFILE_PATH_LENGTH PATH_MAX -#define INITIAL_ALLOCATION_BUFFER_SIZE 64 - -// forward declare -struct tTagSchema; +#define DEFAULT_PAGE_SIZE 16384 // 16k larger than the SHistoInfo +#define MIN_BUFFER_SIZE (1 << 19) +#define MAX_TMPFILE_PATH_LENGTH PATH_MAX +#define INITIAL_ALLOCATION_BUFFER_SIZE 64 typedef enum EXT_BUFFER_FLUSH_MODEL { /* @@ -61,12 +55,12 @@ typedef struct tFlushoutData { tFlushoutInfo *pFlushoutInfo; } tFlushoutData; -typedef struct tFileMeta { +typedef struct SFileInfo { uint32_t nFileSize; // in pages - uint32_t nPageSize; + uint32_t pageSize; uint32_t numOfElemsInFile; tFlushoutData flushoutData; -} tFileMeta; +} SFileInfo; typedef struct tFilePage { uint64_t numOfElems; @@ -78,65 +72,73 @@ typedef struct tFilePagesItem { tFilePage item; } tFilePagesItem; -typedef struct tColModel { - int32_t maxCapacity; - int32_t numOfCols; - int16_t * colOffset; - struct SSchema *pFields; -} tColModel; +typedef struct SSchemaEx { + struct SSchema field; + int16_t offset; +} SSchemaEx; -typedef struct tOrderIdx { - int32_t numOfOrderedCols; +typedef struct SColumnModel { + int32_t capacity; + int32_t numOfCols; + int16_t rowSize; + SSchemaEx *pFields; +} SColumnModel; + +typedef struct SColumnOrderInfo { + int32_t numOfCols; int16_t pData[]; -} tOrderIdx; +} SColumnOrderInfo; typedef struct tOrderDescriptor { - union { - struct tTagSchema *pTagSchema; - tColModel * pSchema; - }; - int32_t tsOrder; // timestamp order type if exists - tOrderIdx orderIdx; + SColumnModel * pColumnModel; + int32_t tsOrder; // timestamp order type if exists + SColumnOrderInfo orderIdx; } tOrderDescriptor; typedef struct tExtMemBuffer { - int32_t nMaxSizeInPages; - + int32_t inMemCapacity; int32_t nElemSize; - int32_t nPageSize; - - int32_t numOfAllElems; + int32_t pageSize; + int32_t numOfTotalElems; int32_t numOfElemsInBuffer; int32_t numOfElemsPerPage; + int16_t numOfInMemPages; - int16_t numOfPagesInMem; tFilePagesItem *pHead; tFilePagesItem *pTail; - tFileMeta fileMeta; - - char dataFilePath[MAX_TMPFILE_PATH_LENGTH]; - FILE *dataFile; - - tColModel *pColModel; + char * path; + FILE * file; + SFileInfo fileMeta; + SColumnModel * pColumnModel; EXT_BUFFER_FLUSH_MODEL flushModel; } tExtMemBuffer; +/** + * + * @param fileNamePattern + * @param dstPath + */ void getTmpfilePath(const char *fileNamePattern, char *dstPath); -/* - * create ext-memory buffer +/** + * + * @param inMemSize + * @param elemSize + * @param pModel + * @return */ -void tExtMemBufferCreate(tExtMemBuffer **pMemBuffer, int32_t numOfBufferSize, int32_t elemSize, - const char *tmpDataFilePath, tColModel *pModel); +tExtMemBuffer *createExtMemBuffer(int32_t inMemSize, int32_t elemSize, SColumnModel *pModel); -/* - * destroy ext-memory buffer +/** + * + * @param pMemBuffer + * @return */ -void tExtMemBufferDestroy(tExtMemBuffer **pMemBuffer); +void *destoryExtMemBuffer(tExtMemBuffer *pMemBuffer); -/* +/** * @param pMemBuffer * @param data input data pointer * @param numOfRows number of rows in data @@ -145,12 +147,15 @@ void tExtMemBufferDestroy(tExtMemBuffer **pMemBuffer); */ int16_t tExtMemBufferPut(tExtMemBuffer *pMemBuffer, void *data, int32_t numOfRows); -/* - * flush all data into disk and release all in-memory buffer +/** + * + * @param pMemBuffer + * @return */ bool tExtMemBufferFlush(tExtMemBuffer *pMemBuffer); -/* +/** + * * remove all data that has been put into buffer, including in buffer or * ext-buffer(disk) */ @@ -163,11 +168,44 @@ void tExtMemBufferClear(tExtMemBuffer *pMemBuffer); */ bool tExtMemBufferLoadData(tExtMemBuffer *pMemBuffer, tFilePage *pFilePage, int32_t flushIdx, int32_t pageIdx); +/** + * + * @param pMemBuffer + * @return + */ bool tExtMemBufferIsAllDataInMem(tExtMemBuffer *pMemBuffer); -tColModel *tColModelCreate(SSchema *field, int32_t numOfCols, int32_t maxCapacity); +/** + * + * @param fields + * @param numOfCols + * @param blockCapacity + * @return + */ +SColumnModel *createColumnModel(SSchema *fields, int32_t numOfCols, int32_t blockCapacity); + +/** + * + * @param pSrc + * @return + */ +SColumnModel *cloneColumnModel(SColumnModel *pSrc); + +/** + * + * @param pModel + */ +void destroyColumnModel(SColumnModel *pModel); + +/* + * compress data into consecutive block without hole in data + */ +void tColModelCompact(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxElemsCapacity); + +void tColModelErase(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxCapacity, int32_t s, int32_t e); +SSchema *getColumnModelSchema(SColumnModel *pColumnModel, int32_t index); -void tColModelDestroy(tColModel *pModel); +int16_t getColumnModelOffset(SColumnModel *pColumnModel, int32_t index); typedef struct SSrcColumnInfo { int32_t functionId; @@ -177,68 +215,18 @@ typedef struct SSrcColumnInfo { /* * display data in column format model for debug purpose only */ -void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t maxCount); +void tColModelDisplay(SColumnModel *pModel, void *pData, int32_t numOfRows, int32_t maxCount); -void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32_t maxCount, SSrcColumnInfo *pInfo); +void tColModelDisplayEx(SColumnModel *pModel, void *pData, int32_t numOfRows, int32_t maxCount, SSrcColumnInfo *pInfo); -/* - * compress data into consecutive block without hole in data - */ -void tColModelCompact(tColModel *pModel, tFilePage *inputBuffer, int32_t maxElemsCapacity); - -void tColModelErase(tColModel *pModel, tFilePage *inputBuffer, int32_t maxCapacity, int32_t s, int32_t e); - -tOrderDescriptor *tOrderDesCreate(int32_t *orderColIdx, int32_t numOfOrderCols, tColModel *pModel, int32_t tsOrderType); +tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrderCols, SColumnModel *pModel, + int32_t tsOrderType); void tOrderDescDestroy(tOrderDescriptor *pDesc); -void tColModelAppend(tColModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, +void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, int32_t numOfRowsToWrite, int32_t srcCapacity); -/////////////////////////////////////////////////////////////////////////////////////////////////////// -typedef struct MinMaxEntry { - union { - double dMinVal; - int32_t iMinVal; - int64_t i64MinVal; - }; - union { - double dMaxVal; - int32_t iMaxVal; - int64_t i64MaxVal; - }; -} MinMaxEntry; - -typedef struct tMemBucketSegment { - int32_t numOfSlots; - MinMaxEntry * pBoundingEntries; - tExtMemBuffer **pBuffer; -} tMemBucketSegment; - -typedef struct tMemBucket { - int16_t numOfSegs; - int16_t nTotalSlots; - int16_t nSlotsOfSeg; - int16_t dataType; - - int16_t nElemSize; - int32_t numOfElems; - - int32_t nTotalBufferSize; - int32_t maxElemsCapacity; - - int16_t nPageSize; - int16_t numOfTotalPages; - int16_t numOfAvailPages; /* remain available buffer pages */ - - tMemBucketSegment *pSegs; - tOrderDescriptor * pOrderDesc; - - MinMaxEntry nRange; - - void (*HashFunc)(struct tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); -} tMemBucket; - typedef int (*__col_compar_fn_t)(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data); void tColDataQSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType); @@ -253,19 +241,6 @@ int32_t compare_a(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1 int32_t compare_d(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1, int32_t numOfRow2, int32_t s2, char *data2); -tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, - int16_t dataType, tOrderDescriptor *pDesc); - -void tMemBucketDestroy(tMemBucket *pBucket); - -void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows); - -double getPercentile(tMemBucket *pMemBucket, double percent); - -void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); - -void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); - #ifdef __cplusplus } #endif diff --git a/src/inc/tglobalcfg.h b/src/inc/tglobalcfg.h index 63cff06e83f61103350fc752c113a026da296710..35cf6a42443ef40135c3937867339c6634c32140 100644 --- a/src/inc/tglobalcfg.h +++ b/src/inc/tglobalcfg.h @@ -54,6 +54,7 @@ extern char tsDirectory[]; extern char dataDir[]; extern char logDir[]; extern char scriptDir[]; +extern char osName[]; extern char tsMasterIp[]; extern char tsSecondIp[]; @@ -74,13 +75,12 @@ extern int tsMetricMetaKeepTimer; extern float tsNumOfThreadsPerCore; extern float tsRatioOfQueryThreads; extern char tsPublicIp[]; -extern char tsInternalIp[]; extern char tsPrivateIp[]; -extern char tsServerIpStr[]; extern short tsNumOfVnodesPerCore; extern short tsNumOfTotalVnodes; extern short tsCheckHeaderFile; -extern uint32_t tsServerIp; +extern uint32_t tsPublicIpInt; +extern short tsAffectedRowsMod; extern int tsSessionsPerVnode; extern int tsAverageCacheBlocks; @@ -106,7 +106,6 @@ extern int tsMaxDbs; extern int tsMaxTables; extern int tsMaxDnodes; extern int tsMaxVGroups; -extern int tsShellActivityTimer; extern char tsMgmtZone[]; extern char tsLocalIp[]; @@ -127,6 +126,8 @@ extern int tsEnableHttpModule; extern int tsEnableMonitorModule; extern int tsRestRowLimit; extern int tsCompressMsgSize; +extern int tsMaxSQLStringLen; +extern int tsMaxNumOfOrderedResults; extern char tsSocketType[4]; @@ -136,6 +137,7 @@ extern int tsMinIntervalTime; extern int tsMaxStreamComputDelay; extern int tsStreamCompStartDelay; extern int tsStreamCompRetryDelay; +extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window extern int tsProjectExecInterval; extern int64_t tsMaxRetentWindow; @@ -148,9 +150,10 @@ extern int tsHttpMaxThreads; extern int tsHttpEnableCompress; extern int tsHttpEnableRecordSql; extern int tsTelegrafUseFieldNum; -extern int tsAdminRowLimit; extern int tsTscEnableRecordSql; +extern int tsAnyIp; +extern int tsIsCluster; extern char tsMonitorDbName[]; extern char tsInternalPass[]; diff --git a/src/inc/thistogram.h b/src/inc/thistogram.h index 7e5b1ccac6c9f1c882e9690398b6526340cf9fde..bb058449e806c8270dbf141ca3a81103f63c6e5c 100644 --- a/src/inc/thistogram.h +++ b/src/inc/thistogram.h @@ -20,8 +20,6 @@ extern "C" { #endif -#include "tskiplist.h" - #define USE_ARRAYLIST #define MAX_HISTOGRAM_BIN 500 diff --git a/src/inc/tinterpolation.h b/src/inc/tinterpolation.h index 40b8c5cb2f85ad6cc403bce4159bcab12f3c1476..f4b327bcbec82b2b9ca8e2f5c92b044700240dbc 100644 --- a/src/inc/tinterpolation.h +++ b/src/inc/tinterpolation.h @@ -38,13 +38,13 @@ typedef struct SPoint { void * val; } SPoint; -typedef void (*__interpo_callback_fn_t)(void *param); - int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t timeRange, char intervalTimeUnit, int16_t precision); void taosInitInterpoInfo(SInterpolationInfo *pInterpoInfo, int32_t order, int64_t startTimeStamp, int32_t numOfTags, int32_t rowSize); +void taosDestoryInterpoInfo(SInterpolationInfo *pInterpoInfo); + void taosInterpoSetStartInfo(SInterpolationInfo *pInterpoInfo, int32_t numOfRawDataInRows, int32_t type); TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int32_t timeInterval, int8_t intervalTimeUnit, int8_t precision); @@ -78,8 +78,8 @@ int32_t taosNumOfRemainPoints(SInterpolationInfo *pInterpoInfo); */ int32_t taosDoInterpoResult(SInterpolationInfo *pInterpoInfo, int16_t interpoType, tFilePage **data, int32_t numOfRawDataInRows, int32_t outputRows, int64_t nInterval, - int64_t *pPrimaryKeyArray, tColModel *pModel, char **srcData, int64_t *defaultVal, - int32_t *functionIDs, int32_t bufSize); + const int64_t *pPrimaryKeyArray, SColumnModel *pModel, char **srcData, int64_t *defaultVal, + const int32_t *functionIDs, int32_t bufSize); int taosDoLinearInterpolation(int32_t type, SPoint *point1, SPoint *point2, SPoint *point); diff --git a/src/inc/tlog.h b/src/inc/tlog.h index 0d348c27ce8056fd554ab3ea0e2c60e89920b882..7556cc50a1ff23d24ef8eafdcd677f65577c8713 100644 --- a/src/inc/tlog.h +++ b/src/inc/tlog.h @@ -113,7 +113,10 @@ extern uint32_t cdebugFlag; } #define tscPrint(...) \ { tprintf("TSC ", 255, __VA_ARGS__); } - +#define tscDump(...) \ + if (cdebugFlag & DEBUG_TRACE) { \ + taosPrintLongString("TSC ", cdebugFlag, __VA_ARGS__); \ + } #define jniError(...) \ if (jnidebugFlag & DEBUG_ERROR) { \ tprintf("ERROR JNI ", jnidebugFlag, __VA_ARGS__); \ diff --git a/src/inc/tpercentile.h b/src/inc/tpercentile.h new file mode 100644 index 0000000000000000000000000000000000000000..b9cf50e0bbf24357b729f8bc39996f589d6c18fc --- /dev/null +++ b/src/inc/tpercentile.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TPERCENTILE_H +#define TDENGINE_TPERCENTILE_H + +#include "textbuffer.h" + +typedef struct MinMaxEntry { + union { + double dMinVal; + int32_t iMinVal; + int64_t i64MinVal; + }; + union { + double dMaxVal; + int32_t iMaxVal; + int64_t i64MaxVal; + }; +} MinMaxEntry; + +typedef struct tMemBucketSegment { + int32_t numOfSlots; + MinMaxEntry * pBoundingEntries; + tExtMemBuffer **pBuffer; +} tMemBucketSegment; + +typedef struct tMemBucket { + int16_t numOfSegs; + int16_t nTotalSlots; + int16_t nSlotsOfSeg; + int16_t dataType; + + int16_t nElemSize; + int32_t numOfElems; + + int32_t nTotalBufferSize; + int32_t maxElemsCapacity; + + int16_t pageSize; + int16_t numOfTotalPages; + int16_t numOfAvailPages; /* remain available buffer pages */ + + tMemBucketSegment *pSegs; + tOrderDescriptor * pOrderDesc; + + MinMaxEntry nRange; + + void (*HashFunc)(struct tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); +} tMemBucket; + +tMemBucket *tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, int16_t dataType, + tOrderDescriptor *pDesc); + +void tMemBucketDestroy(tMemBucket *pBucket); + +void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows); + +double getPercentile(tMemBucket *pMemBucket, double percent); + +void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); + +void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); + +#endif // TDENGINE_TPERCENTILE_H diff --git a/src/inc/tresultBuf.h b/src/inc/tresultBuf.h new file mode 100644 index 0000000000000000000000000000000000000000..a464479af27a7e8515f4260c0ea6a73aed780933 --- /dev/null +++ b/src/inc/tresultBuf.h @@ -0,0 +1,104 @@ +#ifndef TDENGINE_VNODEQUERYUTIL_H +#define TDENGINE_VNODEQUERYUTIL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "os.h" +#include "textbuffer.h" + +typedef struct SIDList { + uint32_t alloc; + int32_t size; + int32_t* pData; +} SIDList; + +typedef struct SQueryResultBuf { + int32_t numOfRowsPerPage; + int32_t numOfPages; + int64_t totalBufSize; + int32_t fd; // data file fd + int32_t allocateId; // allocated page id + int32_t incStep; // minimum allocated pages + char* pBuf; // mmap buffer pointer + char* path; // file path + + uint32_t numOfAllocGroupIds; // number of allocated id list + void* idsTable; // id hash table + SIDList* list; // for each id, there is a page id list +} SQueryResultBuf; + +/** + * create disk-based result buffer + * @param pResultBuf + * @param size + * @param rowSize + * @return + */ +int32_t createResultBuf(SQueryResultBuf** pResultBuf, int32_t size, int32_t rowSize); + +/** + * + * @param pResultBuf + * @param groupId + * @param pageId + * @return + */ +tFilePage* getNewDataBuf(SQueryResultBuf* pResultBuf, int32_t groupId, int32_t* pageId); + +/** + * + * @param pResultBuf + * @return + */ +int32_t getNumOfRowsPerPage(SQueryResultBuf* pResultBuf); + +/** + * + * @param pResultBuf + * @param groupId + * @return + */ +SIDList getDataBufPagesIdList(SQueryResultBuf* pResultBuf, int32_t groupId); + +/** + * get the specified buffer page by id + * @param pResultBuf + * @param id + * @return + */ +tFilePage* getResultBufferPageById(SQueryResultBuf* pResultBuf, int32_t id); + +/** + * get the total buffer size in the format of disk file + * @param pResultBuf + * @return + */ +int32_t getResBufSize(SQueryResultBuf* pResultBuf); + +/** + * get the number of groups in the result buffer + * @param pResultBuf + * @return + */ +int32_t getNumOfResultBufGroupId(SQueryResultBuf* pResultBuf); + +/** + * destroy result buffer + * @param pResultBuf + */ +void destroyResultBuf(SQueryResultBuf* pResultBuf); + +/** + * + * @param pList + * @return + */ +int32_t getLastPageId(SIDList *pList); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODEQUERYUTIL_H diff --git a/src/inc/tsched.h b/src/inc/tsched.h index dffd7a298a940e6d0f9a5fc7c6a0543beaca38cc..827ecbbb421b78c7d5140efc5c3be6e1edca4578 100644 --- a/src/inc/tsched.h +++ b/src/inc/tsched.h @@ -32,6 +32,8 @@ typedef struct _sched_msg { void *taosInitScheduler(int queueSize, int numOfThreads, const char *label); +void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl); + int taosScheduleTask(void *qhandle, SSchedMsg *pMsg); void taosCleanUpScheduler(void *param); diff --git a/src/inc/tschemautil.h b/src/inc/tschemautil.h index 0b8a2d6a9337c173fb1992c86b3792ccff31e0a0..0031b4fa2590496ca59b02e877f755f273591d08 100644 --- a/src/inc/tschemautil.h +++ b/src/inc/tschemautil.h @@ -47,12 +47,13 @@ struct SSchema *tsGetSchema(SMeterMeta *pMeta); struct SSchema *tsGetTagSchema(SMeterMeta *pMeta); struct SSchema *tsGetColumnSchema(SMeterMeta *pMeta, int32_t startCol); +struct SSchema tsGetTbnameColumnSchema(); char *tsGetTagsValue(SMeterMeta *pMeta); bool tsMeterMetaIdentical(SMeterMeta *p1, SMeterMeta *p2); -void extractMeterName(char *meterId, char *name); +void extractTableName(char *meterId, char *name); SSQLToken extractDBName(char *meterId, char *name); diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 347ba49818101e679f86bb5da70d350e5746e6fd..aa712ab62296c806b530cea14bacba525530d93d 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -100,6 +100,7 @@ extern "C" { #define TSDB_COL_NAME_LEN 64 #define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 16 #define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE +#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 6mb #define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 16 #define TSDB_MAX_TAGS_LEN 512 @@ -189,6 +190,7 @@ extern "C" { #define TSDB_MAX_TABLES_PER_VNODE 220000 #define TSDB_MAX_JOIN_TABLE_NUM 5 +#define TSDB_MAX_UNION_CLAUSE 5 #define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) #define TSDB_MAX_NCHAR_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) @@ -210,7 +212,7 @@ extern "C" { #define TSDB_MAX_RPC_THREADS 5 -#define TSDB_QUERY_TYPE_QUERY 0 // normal query +#define TSDB_QUERY_TYPE_NON_TYPE 0x00U // none type #define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01U // free qhandle at vnode /* @@ -226,6 +228,13 @@ extern "C" { #define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40U // select *,columns... query #define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80U // join sub query at the second stage +#define TSDB_QUERY_TYPE_INSERT 0x100U // insert type +#define TSDB_QUERY_TYPE_IMPORT 0x200U // import data + +#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0) +#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type)) +#define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE) + #define TSQL_SO_ASC 1 #define TSQL_SO_DESC 0 diff --git a/src/inc/tsqldef.h b/src/inc/tsqldef.h index ea0500eb866f65dfe3ef77d0f7b318b2bcbdec0f..182860e67cf8df81401e8c065700b9d9fd6fa387 100644 --- a/src/inc/tsqldef.h +++ b/src/inc/tsqldef.h @@ -119,97 +119,106 @@ #define TK_COMMA 101 #define TK_NULL 102 #define TK_SELECT 103 -#define TK_FROM 104 -#define TK_VARIABLE 105 -#define TK_INTERVAL 106 -#define TK_FILL 107 -#define TK_SLIDING 108 -#define TK_ORDER 109 -#define TK_BY 110 -#define TK_ASC 111 -#define TK_DESC 112 -#define TK_GROUP 113 -#define TK_HAVING 114 -#define TK_LIMIT 115 -#define TK_OFFSET 116 -#define TK_SLIMIT 117 -#define TK_SOFFSET 118 -#define TK_WHERE 119 -#define TK_NOW 120 -#define TK_INSERT 121 -#define TK_INTO 122 -#define TK_VALUES 123 -#define TK_RESET 124 -#define TK_QUERY 125 -#define TK_ADD 126 -#define TK_COLUMN 127 -#define TK_TAG 128 -#define TK_CHANGE 129 -#define TK_SET 130 -#define TK_KILL 131 -#define TK_CONNECTION 132 -#define TK_COLON 133 -#define TK_STREAM 134 -#define TK_ABORT 135 -#define TK_AFTER 136 -#define TK_ATTACH 137 -#define TK_BEFORE 138 -#define TK_BEGIN 139 -#define TK_CASCADE 140 -#define TK_CLUSTER 141 -#define TK_CONFLICT 142 -#define TK_COPY 143 -#define TK_DEFERRED 144 -#define TK_DELIMITERS 145 -#define TK_DETACH 146 -#define TK_EACH 147 -#define TK_END 148 -#define TK_EXPLAIN 149 -#define TK_FAIL 150 -#define TK_FOR 151 -#define TK_IGNORE 152 -#define TK_IMMEDIATE 153 -#define TK_INITIALLY 154 -#define TK_INSTEAD 155 -#define TK_MATCH 156 -#define TK_KEY 157 -#define TK_OF 158 -#define TK_RAISE 159 -#define TK_REPLACE 160 -#define TK_RESTRICT 161 -#define TK_ROW 162 -#define TK_STATEMENT 163 -#define TK_TRIGGER 164 -#define TK_VIEW 165 -#define TK_ALL 166 -#define TK_COUNT 167 -#define TK_SUM 168 -#define TK_AVG 169 -#define TK_MIN 170 -#define TK_MAX 171 -#define TK_FIRST 172 -#define TK_LAST 173 -#define TK_TOP 174 -#define TK_BOTTOM 175 -#define TK_STDDEV 176 -#define TK_PERCENTILE 177 -#define TK_APERCENTILE 178 -#define TK_LEASTSQUARES 179 -#define TK_HISTOGRAM 180 -#define TK_DIFF 181 -#define TK_SPREAD 182 -#define TK_TWA 183 -#define TK_INTERP 184 -#define TK_LAST_ROW 185 -#define TK_SEMI 186 -#define TK_NONE 187 -#define TK_PREV 188 -#define TK_LINEAR 189 -#define TK_IMPORT 190 -#define TK_METRIC 191 -#define TK_TBNAME 192 -#define TK_JOIN 193 -#define TK_METRICS 194 -#define TK_STABLE 195 +#define TK_UNION 104 +#define TK_ALL 105 +#define TK_FROM 106 +#define TK_VARIABLE 107 +#define TK_INTERVAL 108 +#define TK_FILL 109 +#define TK_SLIDING 110 +#define TK_ORDER 111 +#define TK_BY 112 +#define TK_ASC 113 +#define TK_DESC 114 +#define TK_GROUP 115 +#define TK_HAVING 116 +#define TK_LIMIT 117 +#define TK_OFFSET 118 +#define TK_SLIMIT 119 +#define TK_SOFFSET 120 +#define TK_WHERE 121 +#define TK_NOW 122 +#define TK_RESET 123 +#define TK_QUERY 124 +#define TK_ADD 125 +#define TK_COLUMN 126 +#define TK_TAG 127 +#define TK_CHANGE 128 +#define TK_SET 129 +#define TK_KILL 130 +#define TK_CONNECTION 131 +#define TK_COLON 132 +#define TK_STREAM 133 +#define TK_ABORT 134 +#define TK_AFTER 135 +#define TK_ATTACH 136 +#define TK_BEFORE 137 +#define TK_BEGIN 138 +#define TK_CASCADE 139 +#define TK_CLUSTER 140 +#define TK_CONFLICT 141 +#define TK_COPY 142 +#define TK_DEFERRED 143 +#define TK_DELIMITERS 144 +#define TK_DETACH 145 +#define TK_EACH 146 +#define TK_END 147 +#define TK_EXPLAIN 148 +#define TK_FAIL 149 +#define TK_FOR 150 +#define TK_IGNORE 151 +#define TK_IMMEDIATE 152 +#define TK_INITIALLY 153 +#define TK_INSTEAD 154 +#define TK_MATCH 155 +#define TK_KEY 156 +#define TK_OF 157 +#define TK_RAISE 158 +#define TK_REPLACE 159 +#define TK_RESTRICT 160 +#define TK_ROW 161 +#define TK_STATEMENT 162 +#define TK_TRIGGER 163 +#define TK_VIEW 164 +#define TK_COUNT 165 +#define TK_SUM 166 +#define TK_AVG 167 +#define TK_MIN 168 +#define TK_MAX 169 +#define TK_FIRST 170 +#define TK_LAST 171 +#define TK_TOP 172 +#define TK_BOTTOM 173 +#define TK_STDDEV 174 +#define TK_PERCENTILE 175 +#define TK_APERCENTILE 176 +#define TK_LEASTSQUARES 177 +#define TK_HISTOGRAM 178 +#define TK_DIFF 179 +#define TK_SPREAD 180 +#define TK_TWA 181 +#define TK_INTERP 182 +#define TK_LAST_ROW 183 +#define TK_RATE 184 +#define TK_IRATE 185 +#define TK_SUM_RATE 186 +#define TK_SUM_IRATE 187 +#define TK_AVG_RATE 188 +#define TK_AVG_IRATE 189 +#define TK_SEMI 190 +#define TK_NONE 191 +#define TK_PREV 192 +#define TK_LINEAR 193 +#define TK_IMPORT 194 +#define TK_METRIC 195 +#define TK_TBNAME 196 +#define TK_JOIN 197 +#define TK_METRICS 198 +#define TK_STABLE 199 +#define TK_INSERT 200 +#define TK_INTO 201 +#define TK_VALUES 202 #endif + + diff --git a/src/inc/tsqlfunction.h b/src/inc/tsqlfunction.h index a5734ed60e5980b7dafc371f7c6fa4457ef6a7a0..2caecb6309d11065653666e786218ca87e5bcce9 100644 --- a/src/inc/tsqlfunction.h +++ b/src/inc/tsqlfunction.h @@ -60,6 +60,13 @@ extern "C" { #define TSDB_FUNC_LAST_DST 26 #define TSDB_FUNC_INTERP 27 +#define TSDB_FUNC_RATE 28 +#define TSDB_FUNC_IRATE 29 +#define TSDB_FUNC_SUM_RATE 30 +#define TSDB_FUNC_SUM_IRATE 31 +#define TSDB_FUNC_AVG_RATE 32 +#define TSDB_FUNC_AVG_IRATE 33 + #define TSDB_FUNCSTATE_SO 0x1U // single output #define TSDB_FUNCSTATE_MO 0x2U // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM #define TSDB_FUNCSTATE_STREAM 0x4U // function avail for stream @@ -162,8 +169,8 @@ typedef struct SExtTagsInfo { // sql function runtime context typedef struct SQLFunctionCtx { int32_t startOffset; - int32_t size; - int32_t order; + int32_t size; // number of rows + int32_t order; // asc|desc int32_t scanFlag; // TODO merge with currentStage int16_t inputType; @@ -227,8 +234,6 @@ typedef struct SPatternCompareInfo { int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int16_t *len, int16_t *interResBytes, int16_t extLength, bool isSuperTable); -SResultInfo *getResultSupportInfo(SQLFunctionCtx *pCtx); - int patternMatch(const char *zPattern, const char *zString, size_t size, const SPatternCompareInfo *pInfo); int WCSPatternMatch(const wchar_t *zPattern, const wchar_t *zString, size_t size, const SPatternCompareInfo *pInfo); @@ -289,10 +294,10 @@ typedef struct STwaInfo { } STwaInfo; /* global sql function array */ -extern struct SQLAggFuncElem aAggs[28]; +extern struct SQLAggFuncElem aAggs[]; /* compatible check array list */ -extern int32_t funcCompatDefList[28]; +extern int32_t funcCompatDefList[]; void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max, int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull); diff --git a/src/inc/ttime.h b/src/inc/ttime.h index eae24a56b529a5f3d837cdb2df9d60a3064da69f..34c241cbc0f22afc511660cee475c82d08466599 100644 --- a/src/inc/ttime.h +++ b/src/inc/ttime.h @@ -42,6 +42,7 @@ int64_t taosGetTimestamp(int32_t precision); int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts); int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec); +void deltaToUtcInitOnce(); #ifdef __cplusplus } diff --git a/src/inc/ttypes.h b/src/inc/ttypes.h index b2ea8e918a611c692feb76e6a6be05b786874fac..db6490f8404f2b9c0be4f83ce0391ec4dad39a81 100644 --- a/src/inc/ttypes.h +++ b/src/inc/ttypes.h @@ -50,7 +50,7 @@ bool isNull(const char *val, int32_t type); void setNull(char *val, int32_t type, int32_t bytes); void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems); -void assignVal(char *val, char *src, int32_t len, int32_t type); +void assignVal(char *val, const char *src, int32_t len, int32_t type); void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); // variant, each number/string/field_id has a corresponding struct during parsing sql @@ -73,7 +73,7 @@ void tVariantCreateFromBinary(tVariant *pVar, char *pz, uint32_t len, uint32_t t void tVariantDestroy(tVariant *pV); -void tVariantAssign(tVariant *pDst, tVariant *pSrc); +void tVariantAssign(tVariant *pDst, const tVariant *pSrc); int32_t tVariantToString(tVariant *pVar, char *dst); diff --git a/src/inc/tutil.h b/src/inc/tutil.h index b26ef2b1c828782dd9f3a28da9ed61576024174d..b66da286973521c1e6cd29db2b2923cfc371be58 100644 --- a/src/inc/tutil.h +++ b/src/inc/tutil.h @@ -37,8 +37,8 @@ extern "C" { #define tfree(x) \ { \ if (x) { \ - free(x); \ - x = NULL; \ + free((void*)(x)); \ + x = 0; \ } \ } @@ -102,8 +102,8 @@ extern "C" { #define GET_FLOAT_VAL(x) taos_align_get_float(x) #define GET_DOUBLE_VAL(x) taos_align_get_double(x) - float taos_align_get_float(char* pBuf); - double taos_align_get_double(char* pBuf); + float taos_align_get_float(const char* pBuf); + double taos_align_get_double(const char* pBuf); //#define __float_align_declear() float __underlyFloat = 0.0; //#define __float_align_declear() @@ -162,22 +162,13 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP int32_t taosInitTimer(void (*callback)(int), int32_t ms); -/** - * murmur hash algorithm - * @key usually string - * @len key length - * @seed hash seed - * @out an int32 value - */ -uint32_t MurmurHash3_32(const void *key, int32_t len); - bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len); int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes); bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs); -bool taosValidateEncodec(char *encodec); +bool taosValidateEncodec(const char *encodec); bool taosGetVersionNumber(char *versionStr, int *versionNubmer); @@ -189,8 +180,12 @@ static FORCE_INLINE void taosEncryptPass(uint8_t *inBuf, unsigned int inLen, cha memcpy(target, context.digest, TSDB_KEY_LEN); } +int taosCheckVersion(char *input_client_version, char *input_server_version, int compared_segments); + char *taosIpStr(uint32_t ipInt); +uint32_t ip2uint(const char *const ip_addr); + #define TAOS_ALLOC_MODE_DEFAULT 0 #define TAOS_ALLOC_MODE_RANDOM_FAIL 1 #define TAOS_ALLOC_MODE_DETECT_LEAK 2 diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index 7442367e91dbd1f972b0bfa703720e6d15be0182..0f490c58b169284a8892e259513fd42ec8d2cc38 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -9,8 +9,15 @@ INCLUDE_DIRECTORIES(inc) IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(./src SRC) LIST(REMOVE_ITEM SRC ./src/shellWindows.c) + LIST(REMOVE_ITEM SRC ./src/shellDarwin.c) ADD_EXECUTABLE(shell ${SRC}) - TARGET_LINK_LIBRARIES(shell taos_static) + + IF (TD_PAGMODE_LITE) + TARGET_LINK_LIBRARIES(shell taos) + ELSE () + TARGET_LINK_LIBRARIES(shell taos_static) + ENDIF () + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) ELSEIF (TD_WINDOWS_64) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/pthread) @@ -24,7 +31,9 @@ ELSEIF (TD_WINDOWS_64) ELSEIF (TD_DARWIN_64) LIST(APPEND SRC ./src/shellEngine.c) LIST(APPEND SRC ./src/shellMain.c) - LIST(APPEND SRC ./src/shellWindows.c) + LIST(APPEND SRC ./src/shellDarwin.c) + LIST(APPEND SRC ./src/shellCommand.c) + LIST(APPEND SRC ./src/shellImport.c) ADD_EXECUTABLE(shell ${SRC}) TARGET_LINK_LIBRARIES(shell taos_static) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 499c93e0ec96ddf0b3b655286207cf23b1f5694d..0c8153e3ab615b2ecc33bbc815cc38552967c393 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -84,7 +84,6 @@ int shellDumpResult(TAOS* con, char* fname, int* error_no, bool printMode); void shellPrintNChar(char* str, int width, bool printMode); void shellGetGrantInfo(void *con); int isCommentLine(char *line); -#define max(a, b) ((int)(a) < (int)(b) ? (int)(b) : (int)(a)) /**************** Global variable declarations ****************/ extern char PROMPT_HEADER[]; diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index 46aa04c1d73f43e9279536b4ac31ea7c2dc0dbeb..16545a5fe807fc72ba730a4d0869a82b960878dd 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -19,6 +19,8 @@ #include "shell.h" #include "shellCommand.h" +extern int wcwidth(wchar_t c); +extern int wcswidth(const wchar_t *s, size_t n); typedef struct { char widthInString; char widthOnScreen; diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c new file mode 100644 index 0000000000000000000000000000000000000000..b624f5ee68535026580af25aa962a8f6a79f963e --- /dev/null +++ b/src/kit/shell/src/shellDarwin.c @@ -0,0 +1,532 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define __USE_XOPEN + +#include "os.h" + +#include "shell.h" +#include "shellCommand.h" +#include "tkey.h" + +#define OPT_ABORT 1 /* �Cabort */ + +int indicator = 1; +struct termios oldtio; + +extern int wcwidth(wchar_t c); +void insertChar(Command *cmd, char *c, int size); + + +void printHelp() { + char indent[10] = " "; + printf("taos shell is used to test the TDEngine database\n"); + + printf("%s%s\n", indent, "-h"); + printf("%s%s%s\n", indent, indent, "TDEngine server IP address to connect. The default host is localhost."); + printf("%s%s\n", indent, "-p"); + printf("%s%s%s\n", indent, indent, "The password to use when connecting to the server."); + printf("%s%s\n", indent, "-P"); + printf("%s%s%s\n", indent, indent, "The TCP/IP port number to use for the connection"); + printf("%s%s\n", indent, "-u"); + printf("%s%s%s\n", indent, indent, "The TDEngine user name to use when connecting to the server."); + printf("%s%s\n", indent, "-c"); + printf("%s%s%s\n", indent, indent, "Configuration directory."); + printf("%s%s\n", indent, "-s"); + printf("%s%s%s\n", indent, indent, "Commands to run without enter the shell."); + printf("%s%s\n", indent, "-r"); + printf("%s%s%s\n", indent, indent, "Output time as unsigned long.."); + printf("%s%s\n", indent, "-f"); + printf("%s%s%s\n", indent, indent, "Script to run without enter the shell."); + printf("%s%s\n", indent, "-d"); + printf("%s%s%s\n", indent, indent, "Database to use when connecting to the server."); + printf("%s%s\n", indent, "-t"); + printf("%s%s%s\n", indent, indent, "Time zone of the shell, default is local."); + printf("%s%s\n", indent, "-D"); + printf("%s%s%s\n", indent, indent, "Use multi-thread to import all SQL files in the directory separately."); + printf("%s%s\n", indent, "-T"); + printf("%s%s%s\n", indent, indent, "Number of threads when using multi-thread to import data."); + + exit(EXIT_SUCCESS); +} + +void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { + wordexp_t full_path; + for (int i = 1; i < argc; i++) { + // for host + if (strcmp(argv[i], "-h") == 0) { + if (i < argc - 1) { + arguments->host = argv[++i]; + } else { + fprintf(stderr, "option -h requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for password + else if (strcmp(argv[i], "-p") == 0) { + arguments->is_use_passwd = true; + } + // for management port + else if (strcmp(argv[i], "-P") == 0) { + if (i < argc - 1) { + tsMgmtShellPort = atoi(argv[++i]); + } else { + fprintf(stderr, "option -P requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for user + else if (strcmp(argv[i], "-u") == 0) { + if (i < argc - 1) { + arguments->user = argv[++i]; + } else { + fprintf(stderr, "option -u requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-c") == 0) { + if (i < argc - 1) { + strcpy(configDir, argv[++i]); + } else { + fprintf(stderr, "Option -c requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-s") == 0) { + if (i < argc - 1) { + arguments->commands = argv[++i]; + } else { + fprintf(stderr, "option -s requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-r") == 0) { + arguments->is_raw_time = true; + } + // For temperory batch commands to run TODO + else if (strcmp(argv[i], "-f") == 0) { + if (i < argc - 1) { + strcpy(arguments->file, argv[++i]); + } else { + fprintf(stderr, "option -f requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for default database + else if (strcmp(argv[i], "-d") == 0) { + if (i < argc - 1) { + arguments->database = argv[++i]; + } else { + fprintf(stderr, "option -d requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For time zone + else if (strcmp(argv[i], "-t") == 0) { + if (i < argc - 1) { + arguments->timezone = argv[++i]; + } else { + fprintf(stderr, "option -t requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For import directory + else if (strcmp(argv[i], "-D") == 0) { + if (i < argc - 1) { + if (wordexp(argv[++i], &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", argv[i]); + exit(EXIT_FAILURE); + } + strcpy(arguments->dir, full_path.we_wordv[0]); + wordfree(&full_path); + } else { + fprintf(stderr, "option -D requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For time zone + else if (strcmp(argv[i], "-T") == 0) { + if (i < argc - 1) { + arguments->threadNum = atoi(argv[++i]); + } else { + fprintf(stderr, "option -T requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For temperory command TODO + else if (strcmp(argv[i], "--help") == 0) { + printHelp(); + exit(EXIT_FAILURE); + } else { + fprintf(stderr, "wrong options\n"); + printHelp(); + exit(EXIT_FAILURE); + } + } +} + +void shellReadCommand(TAOS *con, char *command) { + unsigned hist_counter = history.hend; + char utf8_array[10] = "\0"; + Command cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.buffer = (char *)calloc(1, MAX_COMMAND_SIZE); + cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE); + showOnScreen(&cmd); + + // Read input. + char c; + while (1) { + c = getchar(); + + if (c < 0) { // For UTF-8 + int count = countPrefixOnes(c); + utf8_array[0] = c; + for (int k = 1; k < count; k++) { + c = getchar(); + utf8_array[k] = c; + } + insertChar(&cmd, utf8_array, count); + } else if (c < '\033') { + // Ctrl keys. TODO: Implement ctrl combinations + switch (c) { + case 1: // ctrl A + positionCursorHome(&cmd); + break; + case 3: + printf("\n"); + resetCommand(&cmd, ""); + kill(0, SIGINT); + break; + case 4: // EOF or Ctrl+D + printf("\n"); + taos_close(con); + // write the history + write_history(); + exitShell(); + break; + case 5: // ctrl E + positionCursorEnd(&cmd); + break; + case 8: + backspaceChar(&cmd); + break; + case '\n': + case '\r': + printf("\n"); + if (isReadyGo(&cmd)) { + sprintf(command, "%s%s", cmd.buffer, cmd.command); + tfree(cmd.buffer); + tfree(cmd.command); + return; + } else { + updateBuffer(&cmd); + } + break; + case 12: // Ctrl + L; + system("clear"); + showOnScreen(&cmd); + break; + } + } else if (c == '\033') { + c = getchar(); + switch (c) { + case '[': + c = getchar(); + switch (c) { + case 'A': // Up arrow + if (hist_counter != history.hstart) { + hist_counter = (hist_counter + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE; + resetCommand(&cmd, (history.hist[hist_counter] == NULL) ? "" : history.hist[hist_counter]); + } + break; + case 'B': // Down arrow + if (hist_counter != history.hend) { + int next_hist = (hist_counter + 1) % MAX_HISTORY_SIZE; + + if (next_hist != history.hend) { + resetCommand(&cmd, (history.hist[next_hist] == NULL) ? "" : history.hist[next_hist]); + } else { + resetCommand(&cmd, ""); + } + hist_counter = next_hist; + } + break; + case 'C': // Right arrow + moveCursorRight(&cmd); + break; + case 'D': // Left arrow + moveCursorLeft(&cmd); + break; + case '1': + if ((c = getchar()) == '~') { + // Home key + positionCursorHome(&cmd); + } + break; + case '2': + if ((c = getchar()) == '~') { + // Insert key + } + break; + case '3': + if ((c = getchar()) == '~') { + // Delete key + deleteChar(&cmd); + } + break; + case '4': + if ((c = getchar()) == '~') { + // End key + positionCursorEnd(&cmd); + } + break; + case '5': + if ((c = getchar()) == '~') { + // Page up key + } + break; + case '6': + if ((c = getchar()) == '~') { + // Page down key + } + break; + case 72: + // Home key + positionCursorHome(&cmd); + break; + case 70: + // End key + positionCursorEnd(&cmd); + break; + } + break; + } + } else if (c == 0x7f) { + // press delete key + backspaceChar(&cmd); + } else { + insertChar(&cmd, &c, 1); + } + } +} + +void *shellLoopQuery(void *arg) { + if (indicator) { + get_old_terminal_mode(&oldtio); + indicator = 0; + } + + TAOS *con = (TAOS *)arg; + + pthread_cleanup_push(cleanup_handler, NULL); + + char *command = malloc(MAX_COMMAND_SIZE); + if (command == NULL){ + tscError("failed to malloc command"); + return NULL; + } + while (1) { + // Read command from shell. + + memset(command, 0, MAX_COMMAND_SIZE); + set_terminal_mode(); + shellReadCommand(con, command); + reset_terminal_mode(); + + // Run the command + shellRunCommand(con, command); + } + + pthread_cleanup_pop(1); + + return NULL; +} + +void shellPrintNChar(char *str, int width, bool printMode) { + int col_left = width; + wchar_t wc; + while (col_left > 0) { + if (*str == '\0') break; + char *tstr = str; + int byte_width = mbtowc(&wc, tstr, MB_CUR_MAX); + if (byte_width <= 0) break; + int col_width = wcwidth(wc); + if (col_width <= 0) { + str += byte_width; + continue; + } + if (col_left < col_width) break; + printf("%lc", wc); + str += byte_width; + col_left -= col_width; + } + + while (col_left > 0) { + printf(" "); + col_left--; + } + + if (!printMode) { + printf("|"); + } else { + printf("\n"); + } +} + +int get_old_terminal_mode(struct termios *tio) { + /* Make sure stdin is a terminal. */ + if (!isatty(STDIN_FILENO)) { + return -1; + } + + // Get the parameter of current terminal + if (tcgetattr(0, &oldtio) != 0) { + return -1; + } + + return 1; +} + +void reset_terminal_mode() { + if (tcsetattr(0, TCSANOW, &oldtio) != 0) { + fprintf(stderr, "Fail to reset the terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void set_terminal_mode() { + struct termios newtio; + + /* if (atexit(reset_terminal_mode) != 0) { */ + /* fprintf(stderr, "Error register exit function!\n"); */ + /* exit(EXIT_FAILURE); */ + /* } */ + + memcpy(&newtio, &oldtio, sizeof(oldtio)); + + // Set new terminal attributes. + newtio.c_iflag &= ~(IXON | IXOFF | ICRNL | INLCR | IGNCR | IMAXBEL | ISTRIP); + newtio.c_iflag |= IGNBRK; + + // newtio.c_oflag &= ~(OPOST|ONLCR|OCRNL|ONLRET); + newtio.c_oflag |= OPOST; + newtio.c_oflag |= ONLCR; + newtio.c_oflag &= ~(OCRNL | ONLRET); + + newtio.c_lflag &= ~(IEXTEN | ICANON | ECHO | ECHOE | ECHONL | ECHOCTL | ECHOPRT | ECHOKE | ISIG); + newtio.c_cc[VMIN] = 1; + newtio.c_cc[VTIME] = 0; + + if (tcsetattr(0, TCSANOW, &newtio) != 0) { + fprintf(stderr, "Fail to set terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void get_history_path(char *history) { sprintf(history, "%s/%s", getpwuid(getuid())->pw_dir, HISTORY_FILE); } + +void clearScreen(int ecmd_pos, int cursor_pos) { + struct winsize w; + ioctl(0, TIOCGWINSZ, &w); + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + positionCursor(cursor_y, LEFT); + positionCursor(command_x - cursor_x, DOWN); + fprintf(stdout, "\033[2K"); + for (int i = 0; i < command_x; i++) { + positionCursor(1, UP); + fprintf(stdout, "\033[2K"); + } + fflush(stdout); +} + +void showOnScreen(Command *cmd) { + struct winsize w; + if (ioctl(0, TIOCGWINSZ, &w) < 0 || w.ws_col == 0 || w.ws_row == 0) { + fprintf(stderr, "No stream device\n"); + exit(EXIT_FAILURE); + } + + wchar_t wc; + int size = 0; + + // Print out the command. + char *total_string = malloc(MAX_COMMAND_SIZE); + memset(total_string, '\0', MAX_COMMAND_SIZE); + if (strcmp(cmd->buffer, "") == 0) { + sprintf(total_string, "%s%s", PROMPT_HEADER, cmd->command); + } else { + sprintf(total_string, "%s%s", CONTINUE_PROMPT, cmd->command); + } + + int remain_column = w.ws_col; + /* size = cmd->commandSize + prompt_size; */ + for (char *str = total_string; size < cmd->commandSize + prompt_size;) { + int ret = mbtowc(&wc, str, MB_CUR_MAX); + if (ret < 0) break; + size += ret; + /* assert(size >= 0); */ + int width = wcwidth(wc); + if (remain_column > width) { + printf("%lc", wc); + remain_column -= width; + } else { + if (remain_column == width) { + printf("%lc\n\r", wc); + remain_column = w.ws_col; + } else { + printf("\n\r%lc", wc); + remain_column = w.ws_col - width; + } + } + + str = total_string + size; + } + + free(total_string); + /* for (int i = 0; i < size; i++){ */ + /* char c = total_string[i]; */ + /* if (k % w.ws_col == 0) { */ + /* printf("%c\n\r", c); */ + /* } */ + /* else { */ + /* printf("%c", c); */ + /* } */ + /* k += 1; */ + /* } */ + + // Position the cursor + int cursor_pos = cmd->screenOffset + prompt_size; + int ecmd_pos = cmd->endOffset + prompt_size; + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + // int cursor_y = cursor % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + int command_y = ecmd_pos % w.ws_col; + // int command_y = (command.size() + prompt_size) % w.ws_col; + positionCursor(command_y, LEFT); + positionCursor(command_x, UP); + positionCursor(cursor_x, DOWN); + positionCursor(cursor_y, RIGHT); + fflush(stdout); +} + +void cleanup_handler(void *arg) { tcsetattr(0, TCSANOW, &oldtio); } + +void exitShell() { + tcsetattr(0, TCSANOW, &oldtio); + exit(EXIT_SUCCESS); +} diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index c1caf6147d9e1d931cd3485f609834828a4a89bf..27a4aaaa0cd24d84b46003ec3195f130d3ee8c85 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -21,27 +21,13 @@ #include "shellCommand.h" #include "ttime.h" #include "tutil.h" +#include "taoserror.h" + #include /**************** Global variables ****************/ -#ifdef WINDOWS - char CLIENT_VERSION[] = "Welcome to the TDengine shell from windows, client version:%s "; -#elif defined(DARWIN) - char CLIENT_VERSION[] = "Welcome to the TDengine shell from mac, client version:%s "; -#else - #ifdef CLUSTER - char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, enterprise client version:%s "; - #else - char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, community client version:%s "; - #endif -#endif - -#ifdef CLUSTER - char SERVER_VERSION[] = "enterprise server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; -#else - char SERVER_VERSION[] = "community server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; -#endif - +char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; @@ -53,7 +39,7 @@ History history; */ TAOS *shellInit(struct arguments *args) { printf("\n"); - printf(CLIENT_VERSION, taos_get_client_info()); + printf(CLIENT_VERSION, osName, taos_get_client_info()); fflush(stdout); // set options before initializing @@ -110,7 +96,7 @@ TAOS *shellInit(struct arguments *args) { exit(EXIT_SUCCESS); } -#ifdef LINUX +#ifndef WINDOWS if (args->dir[0] != 0) { source_dir(con, args); taos_close(con); @@ -118,8 +104,6 @@ TAOS *shellInit(struct arguments *args) { } #endif - printf(SERVER_VERSION, taos_get_server_info(con)); - return con; } @@ -159,6 +143,8 @@ void shellReplaceCtrlChar(char *str) { } break; default: + *pstr = *str; + pstr++; break; } ctrlOn = false; @@ -292,7 +278,6 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { if (fname != NULL) { wordfree(&full_path); } - return; } /* Function to do regular expression check */ @@ -373,29 +358,29 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { for (int col = 0; col < num_fields; col++) { switch (fields[col].type) { case TSDB_DATA_TYPE_BOOL: - l[col] = max(BOOL_OUTPUT_LENGTH, strlen(fields[col].name)); + l[col] = MAX(BOOL_OUTPUT_LENGTH, strlen(fields[col].name)); break; case TSDB_DATA_TYPE_TINYINT: - l[col] = max(TINYINT_OUTPUT_LENGTH, strlen(fields[col].name)); + l[col] = MAX(TINYINT_OUTPUT_LENGTH, strlen(fields[col].name)); break; case TSDB_DATA_TYPE_SMALLINT: - l[col] = max(SMALLINT_OUTPUT_LENGTH, strlen(fields[col].name)); + l[col] = MAX(SMALLINT_OUTPUT_LENGTH, strlen(fields[col].name)); break; case TSDB_DATA_TYPE_INT: - l[col] = max(INT_OUTPUT_LENGTH, strlen(fields[col].name)); + l[col] = MAX(INT_OUTPUT_LENGTH, strlen(fields[col].name)); break; case TSDB_DATA_TYPE_BIGINT: - l[col] = max(BIGINT_OUTPUT_LENGTH, strlen(fields[col].name)); + l[col] = MAX(BIGINT_OUTPUT_LENGTH, strlen(fields[col].name)); break; case TSDB_DATA_TYPE_FLOAT: - l[col] = max(FLOAT_OUTPUT_LENGTH, strlen(fields[col].name)); + l[col] = MAX(FLOAT_OUTPUT_LENGTH, strlen(fields[col].name)); break; case TSDB_DATA_TYPE_DOUBLE: - l[col] = max(DOUBLE_OUTPUT_LENGTH, strlen(fields[col].name)); + l[col] = MAX(DOUBLE_OUTPUT_LENGTH, strlen(fields[col].name)); break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - l[col] = max(fields[col].bytes, strlen(fields[col].name)); + l[col] = MAX(fields[col].bytes, strlen(fields[col].name)); /* l[col] = max(BINARY_OUTPUT_LENGTH, strlen(fields[col].name)); */ break; case TSDB_DATA_TYPE_TIMESTAMP: { @@ -406,7 +391,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { defaultWidth += 3; } - l[col] = max(defaultWidth, strlen(fields[col].name)); + l[col] = MAX(defaultWidth, strlen(fields[col].name)); break; } @@ -452,28 +437,18 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { printf("%*d|", l[i], *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - printf("%*lld|", l[i], *((int64_t *)row[i])); + printf("%*" PRId64 "|", l[i], *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: { -#ifdef _TD_ARM_32_ float fv = 0; - //memcpy(&fv, row[i], sizeof(float)); - *(int32_t*)(&fv) = *(int32_t*)row[i]; + fv = GET_FLOAT_VAL(row[i]); printf("%*.5f|", l[i], fv); -#else - printf("%*.5f|", l[i], *((float *)row[i])); -#endif - } + } break; case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32_ double dv = 0; - //memcpy(&dv, row[i], sizeof(double)); - *(int64_t*)(&dv) = *(int64_t*)row[i]; + dv = GET_DOUBLE_VAL(row[i]); printf("%*.9f|", l[i], dv); -#else - printf("%*.9f|", l[i], *((double *)row[i])); -#endif } break; case TSDB_DATA_TYPE_BINARY: @@ -487,7 +462,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { break; case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { - printf(" %lld|", *(int64_t *)row[i]); + printf(" %" PRId64 "|", *(int64_t *)row[i]); } else { if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)((*(int64_t *)row[i]) / 1000000); @@ -537,40 +512,30 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { printf("%d\n", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - printf("%lld\n", *((int64_t *)row[i])); + printf("%" PRId64 "\n", *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: { -#ifdef _TD_ARM_32_ float fv = 0; - //memcpy(&fv, row[i], sizeof(float)); - *(int32_t*)(&fv) = *(int32_t*)row[i]; + fv = GET_FLOAT_VAL(row[i]); printf("%.5f\n", fv); -#else - printf("%.5f\n", *((float *)row[i])); -#endif - } + } break; case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32_ double dv = 0; - //memcpy(&dv, row[i], sizeof(double)); - *(int64_t*)(&dv) = *(int64_t*)row[i]; + dv = GET_DOUBLE_VAL(row[i]); printf("%.9f\n", dv); -#else - printf("%.9f\n", *((double *)row[i])); -#endif } break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: memset(t_str, 0, TSDB_MAX_BYTES_PER_ROW); memcpy(t_str, row[i], fields[i].bytes); - l[i] = max(fields[i].bytes, strlen(fields[i].name)); + l[i] = MAX(fields[i].bytes, strlen(fields[i].name)); shellPrintNChar(t_str, l[i], printMode); break; case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { - printf("%lld\n", *(int64_t *)row[i]); + printf("%" PRId64 "\n", *(int64_t *)row[i]); } else { if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)((*(int64_t *)row[i]) / 1000000); @@ -625,28 +590,18 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { fprintf(fp, "%d", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%lld", *((int64_t *)row[i])); + fprintf(fp, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: { -#ifdef _TD_ARM_32_ float fv = 0; - //memcpy(&fv, row[i], sizeof(float)); - *(int32_t*)(&fv) = *(int32_t*)row[i]; + fv = GET_FLOAT_VAL(row[i]); fprintf(fp, "%.5f", fv); -#else - fprintf(fp, "%.5f", *((float *)row[i])); -#endif - } + } break; case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32_ double dv = 0; - //memcpy(&dv, row[i], sizeof(double)); - *(int64_t*)(&dv) = *(int64_t*)row[i]; + dv = GET_DOUBLE_VAL(row[i]); fprintf(fp, "%.9f", dv); -#else - fprintf(fp, "%.9f", *((double *)row[i])); -#endif } break; case TSDB_DATA_TYPE_BINARY: @@ -657,7 +612,7 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) { break; case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { - fprintf(fp, "%lld", *(int64_t *)row[i]); + fprintf(fp, "%" PRId64, *(int64_t *)row[i]); } else { if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)((*(int64_t *)row[i]) / 1000000); @@ -779,21 +734,32 @@ int isCommentLine(char *line) { void source_file(TAOS *con, char *fptr) { wordexp_t full_path; int read_len = 0; - char * cmd = malloc(MAX_COMMAND_SIZE); + char * cmd = calloc(1, MAX_COMMAND_SIZE); size_t cmd_len = 0; char * line = NULL; size_t line_len = 0; if (wordexp(fptr, &full_path, 0) != 0) { fprintf(stderr, "ERROR: illegal file name\n"); + free(cmd); return; } char *fname = full_path.we_wordv[0]; - if (access(fname, R_OK) == -1) { + if (access(fname, F_OK) != 0) { + fprintf(stderr, "ERROR: file %s is not exist\n", fptr); + + wordfree(&full_path); + free(cmd); + return; + } + + if (access(fname, R_OK) != 0) { fprintf(stderr, "ERROR: file %s is not readable\n", fptr); + wordfree(&full_path); + free(cmd); return; } @@ -801,6 +767,7 @@ void source_file(TAOS *con, char *fptr) { if (f == NULL) { fprintf(stderr, "ERROR: failed to open file %s\n", fname); wordfree(&full_path); + free(cmd); return; } @@ -833,11 +800,16 @@ void source_file(TAOS *con, char *fptr) { } void shellGetGrantInfo(void *con) { -#ifdef CLUSTER char sql[] = "show grants"; - if (taos_query(con, sql)) { - fprintf(stdout, "\n"); + int code = taos_query(con, sql); + + if (code != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_OPS_NOT_SUPPORT) { + fprintf(stdout, "Server is Community Edition, version is %s\n\n", taos_get_server_info(con)); + } else { + fprintf(stderr, "Failed to check Server Edition, Reason:%d:%s\n\n", taos_errno(con), taos_errstr(con)); + } return; } @@ -859,18 +831,18 @@ void shellGetGrantInfo(void *con) { exit(0); } - char version[32] = {0}; + char serverVersion[32] = {0}; char expiretime[32] = {0}; char expired[32] = {0}; - memcpy(version, row[0], fields[0].bytes); + memcpy(serverVersion, row[0], fields[0].bytes); memcpy(expiretime, row[1], fields[1].bytes); memcpy(expired, row[2], fields[2].bytes); if (strcmp(expiretime, "unlimited") == 0) { - fprintf(stdout, "This is the %s version and will never expire.\n", version); + fprintf(stdout, "Server is Enterprise %s Edition, version is %s and will never expire.\n", serverVersion, taos_get_server_info(con)); } else { - fprintf(stdout, "This is the %s version and will expire at %s.\n", version, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, version is %s and will expire at %s.\n", serverVersion, taos_get_server_info(con), expiretime); } taos_free_result(result); @@ -878,5 +850,4 @@ void shellGetGrantInfo(void *con) { } fprintf(stdout, "\n"); -#endif } diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index 3292aa8e04bba29b8e3636a3b4988ae2f4cd8362..dd04f935e7a30f6a8775b831c3ec726855f520f4 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -90,20 +90,12 @@ static void shellParseDirectory(const char *directoryName, const char *prefix, c static void shellCheckTablesSQLFile(const char *directoryName) { - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/tables.sql", directoryName); - - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(0); - } + sprintf(shellTablesSQLFile, "%s/tables.sql", directoryName); - while (fscanf(fp, "%s", shellTablesSQLFile)) { - break; + struct stat fstat; + if (stat(shellTablesSQLFile, &fstat) < 0) { + shellTablesSQLFile[0] = 0; } - - pclose(fp); } static void shellMallocSQLFiles() @@ -153,10 +145,20 @@ static void shellSourceFile(TAOS *con, char *fptr) { } char *fname = full_path.we_wordv[0]; - - if (access(fname, R_OK) == -1) { + + if (access(fname, F_OK) != 0) { + fprintf(stderr, "ERROR: file %s is not exist\n", fptr); + + wordfree(&full_path); + free(cmd); + return; + } + + if (access(fname, R_OK) != 0) { fprintf(stderr, "ERROR: file %s is not readable\n", fptr); + wordfree(&full_path); + free(cmd); return; } diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 67df2ea1614f59ed0d5807952324d77b76118f36..081b9eae319f3570ab11b67e075292648dd76161 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -26,6 +26,7 @@ int indicator = 1; struct termios oldtio; +extern int wcwidth(wchar_t c); void insertChar(Command *cmd, char *c, int size); const char *argp_program_version = version; const char *argp_program_bug_address = ""; @@ -118,18 +119,18 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { static struct argp argp = {options, parse_opt, args_doc, doc}; void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { - char verType[32] = {0}; - #ifdef CLUSTER - sprintf(verType, "enterprise version: %s\n", version); - #else - sprintf(verType, "community version: %s\n", version); - #endif - + static char verType[32] = {0}; + sprintf(verType, "version: %s\n", version); + argp_program_version = verType; argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { - error(10, 0, "ABORTED"); + #ifndef _ALPINE + error(10, 0, "ABORTED"); + #else + abort(); + #endif } } @@ -290,7 +291,10 @@ void *shellLoopQuery(void *arg) { pthread_cleanup_push(cleanup_handler, NULL); char *command = malloc(MAX_COMMAND_SIZE); - + if (command == NULL){ + tscError("failed to malloc command"); + return NULL; + } while (1) { // Read command from shell. @@ -299,10 +303,8 @@ void *shellLoopQuery(void *arg) { shellReadCommand(con, command); reset_terminal_mode(); - if (command != NULL) { - // Run the command - shellRunCommand(con, command); - } + // Run the command + shellRunCommand(con, command); } pthread_cleanup_pop(1); diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index 599875fa6086276e0b7ea80b97da1e3791a4f73e..d47e6a06dfccf955733d270e93cfa28cc5331403 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -9,5 +9,11 @@ INCLUDE_DIRECTORIES(inc) IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) - TARGET_LINK_LIBRARIES(taosdemo taos_static) + + IF (TD_PAGMODE_LITE) + TARGET_LINK_LIBRARIES(taosdemo taos) + ELSE () + TARGET_LINK_LIBRARIES(taosdemo taos_static) + ENDIF () + ENDIF () diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 37530e1e8ce87d3a2a8e0a74abbb154ead6b770f..24855ab8b51a87aed91ac10e8278941fe60bfee1 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -17,7 +17,11 @@ #include #include +#include + +#ifndef _ALPINE #include +#endif #include #include #include @@ -33,8 +37,6 @@ extern char configDir[]; -#pragma GCC diagnostic ignored "-Wmissing-braces" - #define BUFFER_SIZE 65536 #define MAX_DB_NAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 @@ -267,30 +269,35 @@ double getCurrentTime(); void callBack(void *param, TAOS_RES *res, int code); int main(int argc, char *argv[]) { - struct arguments arguments = {NULL, - 0, - "root", - "taosdata", - "test", - "t", - false, - false, - "./output.txt", - 0, - "int", - "", + struct arguments arguments = {NULL, // host + 0, // port + "root", // user + "taosdata", // password + "test", // database + "t", // tb_prefix + false, // use_metric + false, // insert_only + "./output.txt", // output_file + 0, // mode + { + "int", // datatype "", "", "", "", "", "", - 8, - 1, - 1, - 1, - 1, - 50000}; + "" + }, + 8, // len_of_binary + 1, // num_of_CPR + 1, // num_of_connections + 1, // num_of_RPR + 1, // num_of_tables + 50000, // num_of_DPT + 0, // abort + NULL // arg_list + }; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ @@ -306,7 +313,13 @@ int main(int argc, char *argv[]) { argp_parse(&argp, argc, argv, 0, 0, &arguments); - if (arguments.abort) error(10, 0, "ABORTED"); + if (arguments.abort) { + #ifndef _ALPINE + error(10, 0, "ABORTED"); + #else + abort(); + #endif + } enum MODE query_mode = arguments.mode; char *ip_addr = arguments.host; @@ -339,6 +352,11 @@ int main(int argc, char *argv[]) { } FILE *fp = fopen(arguments.output_file, "a"); + if (NULL == fp) { + fprintf(stderr, "Failed to open %s for writing\n", arguments.output_file); + return 1; + }; + time_t tTime = time(NULL); struct tm tm = *localtime(&tTime); @@ -558,7 +576,7 @@ void *readTable(void *sarg) { double totalT = 0; int count = 0; for (int i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%d where ts>= %ld", aggreFunc[j], tb_prefix, i, sTime); + sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); double t = getCurrentTime(); if (taos_query(taos, command) != 0) { @@ -801,7 +819,7 @@ double getCurrentTime() { void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) { memset(res, 0, MAX_DATA_SIZE); char *pstr = res; - pstr += sprintf(pstr, "(%ld", timestamp); + pstr += sprintf(pstr, "(%" PRId64, timestamp); int c = 0; for (; c < MAX_NUM_DATATYPE; c++) { @@ -818,7 +836,7 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam } else if (strcasecmp(data_type[i % c], "int") == 0) { pstr += sprintf(pstr, ", %d", (int)(rand() % 10)); } else if (strcasecmp(data_type[i % c], "bigint") == 0) { - pstr += sprintf(pstr, ", %ld", rand() % 2147483648); + pstr += sprintf(pstr, ", %" PRId64, rand() % 2147483648); } else if (strcasecmp(data_type[i % c], "float") == 0) { pstr += sprintf(pstr, ", %10.4f", (float)(rand() / 1000)); } else if (strcasecmp(data_type[i % c], "double") == 0) { @@ -830,7 +848,7 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam } else if (strcasecmp(data_type[i % c], "binary") == 0) { char s[len_of_binary]; rand_string(s, len_of_binary); - pstr += sprintf(pstr, ", %s", s); + pstr += sprintf(pstr, ", \"%s\"", s); } } diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index 76b40d1c2d278bd8f13829a719e23c67402bec2e..5b54540782010c38a6e27aa2f3e6766925618651 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -9,5 +9,12 @@ INCLUDE_DIRECTORIES(inc) IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdump ${SRC}) - TARGET_LINK_LIBRARIES(taosdump taos_static) + + IF (TD_PAGMODE_LITE) + TARGET_LINK_LIBRARIES(taosdump taos) + ELSE () + TARGET_LINK_LIBRARIES(taosdump taos_static) + ENDIF () + + ENDIF () diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index c8ef3bc0481c72d284485d7c369db97de6d780e9..8cf015b342649ff4e099a51a2c4b7fe841da3db3 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -15,7 +15,9 @@ #include #include -#include +#ifndef _ALPINE + #include +#endif #include #include #include @@ -335,7 +337,13 @@ int main(int argc, char *argv[]) { reflected in arguments. */ argp_parse(&argp, argc, argv, 0, 0, &arguments); - if (arguments.abort) error(10, 0, "ABORTED"); + if (arguments.abort) { + #ifndef _ALPINE + error(10, 0, "ABORTED"); + #else + abort(); + #endif + } if (taosCheckParam(&arguments) < 0) { exit(EXIT_FAILURE); @@ -789,7 +797,10 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI if (metric != NULL && metric[0] != '\0') { // dump metric definition count = taosGetTableDes(metric, tableDes); - if (count < 0) return -1; + if (count < 0) { + free(tableDes); + return -1; + } taosDumpCreateTableClause(tableDes, count, arguments, fp); @@ -797,18 +808,26 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI count = taosGetTableDes(table, tableDes); - if (count < 0) return -1; + if (count < 0) { + free(tableDes); + return -1; + } taosDumpCreateMTableClause(tableDes, metric, count, arguments, fp); } else { // dump table definition count = taosGetTableDes(table, tableDes); - if (count < 0) return -1; + if (count < 0) { + free(tableDes); + return -1; + } taosDumpCreateTableClause(tableDes, count, arguments, fp); } + free(tableDes); + return taosDumpTableData(fp, table, arguments); } @@ -871,7 +890,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { if (arguments->schemaonly) return 0; - sprintf(command, "select * from %s where _c0 >= %ld and _c0 <= %ld order by _c0 asc", tbname, arguments->start_time, + sprintf(command, "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", tbname, arguments->start_time, arguments->end_time); if (taos_query(taos, command) != 0) { fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(taos)); @@ -925,13 +944,13 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { pstr += sprintf(pstr, "%d", *((int *)row[col])); break; case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, "%ld", *((int64_t *)row[col])); + pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col])); break; case TSDB_DATA_TYPE_FLOAT: - pstr += sprintf(pstr, "%f", *((float *)row[col])); + pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col])); break; case TSDB_DATA_TYPE_DOUBLE: - pstr += sprintf(pstr, "%f", *((double *)row[col])); + pstr += sprintf(pstr, "%f", GET_DOUBLE_VAL(row[col])); break; case TSDB_DATA_TYPE_BINARY: *(pstr++) = '\''; @@ -944,7 +963,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { pstr += sprintf(pstr, "\'%s\'", tbuf); break; case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, "%ld", *(int64_t *)row[col]); + pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]); break; default: break; @@ -1126,7 +1145,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(tcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu: %ld failed to run command %s reason:%s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu: %" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, taos_errstr(taos)); pstr = command; @@ -1174,7 +1193,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(tcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu:%ld failed to run command %s reason: %s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason: %s \ncontinue...\n", linenu, command, taos_errstr(taos)); } @@ -1197,7 +1216,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(lcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu:%ld failed to run command %s reason:%s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, taos_errstr(taos)); } diff --git a/src/modules/http/src/gcJson.c b/src/modules/http/src/gcJson.c index 7f43cb9daba667052f1b3a19b3713a41aeeccba1..8f596337146a3937df72287f332917b3bffa21ac 100644 --- a/src/modules/http/src/gcJson.c +++ b/src/modules/http/src/gcJson.c @@ -121,7 +121,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, } cmd->numOfRows += numOfRows; - for (int i = 0; i < numOfRows; ++i) { + for (int k = 0; k < numOfRows; ++k) { TAOS_ROW row = taos_fetch_row(result); // for group by @@ -161,6 +161,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, if(i < num_fields - 1 ){ len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, ", "); } + } len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "}"); diff --git a/src/modules/http/src/httpHandle.c b/src/modules/http/src/httpHandle.c index c736825b3750d67a92dd43624ab4383ae1f1af34..b46fa11cde6cdf93bba737ccbc223f0f9de910be 100644 --- a/src/modules/http/src/httpHandle.c +++ b/src/modules/http/src/httpHandle.c @@ -279,8 +279,7 @@ bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { httpParseChunkedBody(pContext, pParser, false); return HTTP_CHECK_BODY_SUCCESS; } else { - httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, - pContext->ipstr); + httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr); if (!httpReadDataImp(pContext)) { httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); return HTTP_CHECK_BODY_ERROR; diff --git a/src/modules/http/src/httpJson.c b/src/modules/http/src/httpJson.c index 2bb768e8016d27eb53fe15223e2c70134493176b..5d5d29f4e0fae91c8e30bfb3aaa78fb440b8a188 100644 --- a/src/modules/http/src/httpJson.c +++ b/src/modules/http/src/httpJson.c @@ -119,7 +119,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) { return 0; // there is no data to dump. } else { int len = sprintf(sLen, "%lx\r\n", srcLen); - httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%ld, response:\n%s", + httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s", buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, buf->buf); httpWriteBufNoTrace(buf->pContext, sLen, len); remain = httpWriteBufNoTrace(buf->pContext, buf->buf, (int) srcLen); @@ -131,7 +131,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) { if (ret == 0) { if (compressBufLen > 0) { int len = sprintf(sLen, "%x\r\n", compressBufLen); - httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%ld, compressSize:%d, last:%d, response:\n%s", + httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s", buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, compressBufLen, isTheLast, buf->buf); httpWriteBufNoTrace(buf->pContext, sLen, len); remain = httpWriteBufNoTrace(buf->pContext, (const char *) compressBuf, (int) compressBufLen); @@ -257,7 +257,7 @@ void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int maxLen) { void httpJsonInt64(JsonBuf* buf, int64_t num) { httpJsonItemToken(buf); httpJsonTestBuf(buf, MAX_NUM_STR_SZ); - buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%ld", num); + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRId64, num); } void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) { @@ -310,7 +310,9 @@ void httpJsonInt(JsonBuf* buf, int num) { void httpJsonFloat(JsonBuf* buf, float num) { httpJsonItemToken(buf); httpJsonTestBuf(buf, MAX_NUM_STR_SZ); - if (num > 1E10 || num < -1E10) { + if (isinf(num) || isnan(num)) { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "null"); + } else if (num > 1E10 || num < -1E10) { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.5e", num); } else { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.5f", num); @@ -320,7 +322,9 @@ void httpJsonFloat(JsonBuf* buf, float num) { void httpJsonDouble(JsonBuf* buf, double num) { httpJsonItemToken(buf); httpJsonTestBuf(buf, MAX_NUM_STR_SZ); - if (num > 1E10 || num < -1E10) { + if (isinf(num) || isnan(num)) { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "null"); + } else if (num > 1E10 || num < -1E10) { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.9e", num); } else { buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.9f", num); diff --git a/src/modules/http/src/httpServer.c b/src/modules/http/src/httpServer.c index 01d3ef40f2d5a82821842e0b27c2e648d52bf237..171f811b7d4f07d6ad2a5836669e9c3782ec0e41 100644 --- a/src/modules/http/src/httpServer.c +++ b/src/modules/http/src/httpServer.c @@ -101,12 +101,12 @@ void httpFreeContext(HttpServer *pServer, HttpContext *pContext) { void httpCleanUpContextTimer(HttpContext *pContext) { if (pContext->timer != NULL) { taosTmrStopA(&pContext->timer); - httpTrace("context:%p, ip:%s, close timer:%p", pContext, pContext->ipstr, pContext->timer); + //httpTrace("context:%p, ip:%s, close timer:%p", pContext, pContext->ipstr, pContext->timer); pContext->timer = NULL; } } -void httpCleanUpContext(HttpContext *pContext) { +void httpCleanUpContext(HttpContext *pContext, void *unused) { httpTrace("context:%p, start the clean up operation, sig:%p", pContext, pContext->signature); void *sig = atomic_val_compare_exchange_ptr(&pContext->signature, pContext, 0); if (sig == NULL) { @@ -184,7 +184,7 @@ bool httpInitContext(HttpContext *pContext) { void httpCloseContext(HttpThread *pThread, HttpContext *pContext) { - taosTmrReset(httpCleanUpContext, HTTP_DELAY_CLOSE_TIME_MS, pContext, pThread->pServer->timerHandle, &pContext->timer); + taosTmrReset((TAOS_TMR_CALLBACK)httpCleanUpContext, HTTP_DELAY_CLOSE_TIME_MS, pContext, pThread->pServer->timerHandle, &pContext->timer); httpTrace("context:%p, fd:%d, ip:%s, state:%s will be closed after:%d ms, timer:%p", pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), HTTP_DELAY_CLOSE_TIME_MS, pContext->timer); } @@ -273,7 +273,7 @@ void httpCleanUpConnect(HttpServer *pServer) { taosCloseSocket(pThread->pollFd); while (pThread->pHead) { - httpCleanUpContext(pThread->pHead); + httpCleanUpContext(pThread->pHead, 0); } pthread_cancel(pThread->thread); @@ -329,8 +329,6 @@ bool httpReadDataImp(HttpContext *pContext) { } pParser->buffer[pParser->bufsize] = 0; - httpTrace("context:%p, fd:%d, ip:%s, thread:%s, read size:%d", - pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pParser->bufsize); return true; } @@ -383,10 +381,12 @@ bool httpReadData(HttpThread *pThread, HttpContext *pContext) { int ret = httpCheckReadCompleted(pContext); if (ret == HTTP_CHECK_BODY_CONTINUE) { taosTmrReset(httpCloseContextByServerForExpired, HTTP_EXPIRED_TIME, pContext, pThread->pServer->timerHandle, &pContext->timer); - httpTrace("context:%p, fd:%d, ip:%s, not finished yet, try another times, timer:%p", pContext, pContext->fd, pContext->ipstr, pContext->timer); + //httpTrace("context:%p, fd:%d, ip:%s, not finished yet, try another times, timer:%p", pContext, pContext->fd, pContext->ipstr, pContext->timer); return false; } else if (ret == HTTP_CHECK_BODY_SUCCESS){ httpCleanUpContextTimer(pContext); + httpTrace("context:%p, fd:%d, ip:%s, thread:%s, read size:%d, dataLen:%d", + pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->parser.bufsize, pContext->parser.data.len); if (httpDecompressData(pContext)) { return true; } else { diff --git a/src/modules/http/src/restHandle.c b/src/modules/http/src/restHandle.c index 58509e693d8a76279c85c865825661e9a972efcd..a3077008661590e9813b6f3ec91b6c15d9e42902 100644 --- a/src/modules/http/src/restHandle.c +++ b/src/modules/http/src/restHandle.c @@ -67,10 +67,16 @@ bool restProcessSqlRequest(HttpContext* pContext, int timestampFmt) { return false; } + + /* + * for async test + * / + /* if (httpCheckUsedbSql(sql)) { httpSendErrorResp(pContext, HTTP_NO_EXEC_USEDB); return false; } + */ HttpSqlCmd* cmd = &(pContext->singleCmd); cmd->nativSql = sql; diff --git a/src/modules/http/src/restJson.c b/src/modules/http/src/restJson.c index 8aa0ac5069bde6f8e339c6f64c9010c1b57a1d31..7e98472d538b2f3b8733936d98ce182162336a91 100644 --- a/src/modules/http/src/restJson.c +++ b/src/modules/http/src/restJson.c @@ -94,7 +94,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int num_fields = taos_num_fields(result); TAOS_FIELD *fields = taos_fetch_fields(result); - for (int i = 0; i < numOfRows; ++i) { + for (int k = 0; k < numOfRows; ++k) { TAOS_ROW row = taos_fetch_row(result); // data row array begin @@ -152,7 +152,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, } if (cmd->numOfRows >= tsRestRowLimit) { - httpTrace("context:%p, fd:%d, ip:%s, user:%s, retrieve rows:%lld larger than limit:%d, abort retrieve", pContext, + httpTrace("context:%p, fd:%d, ip:%s, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext, pContext->fd, pContext->ipstr, pContext->user, cmd->numOfRows, tsRestRowLimit); return false; } @@ -163,7 +163,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, return false; } else { - httpTrace("context:%p, fd:%d, ip:%s, user:%s, total rows:%lld retrieved", pContext, pContext->fd, pContext->ipstr, + httpTrace("context:%p, fd:%d, ip:%s, user:%s, total rows:%d retrieved", pContext, pContext->fd, pContext->ipstr, pContext->user, cmd->numOfRows); return true; } diff --git a/src/modules/http/src/tgHandle.c b/src/modules/http/src/tgHandle.c index ac17d6da0968b8a0015da0380b17024d3b4f3e53..b9adf5416274d8f380d8b00a033f2d83fa249084 100644 --- a/src/modules/http/src/tgHandle.c +++ b/src/modules/http/src/tgHandle.c @@ -215,7 +215,7 @@ ParseEnd: } } -int tgParseSchema(char *content, char*fileName) { +int tgParseSchema(const char *content, char*fileName) { cJSON *root = cJSON_Parse(content); if (root == NULL) { httpError("failed to parse telegraf schema file:%s, invalid json format, content:%s", fileName, content); @@ -248,7 +248,7 @@ int tgParseSchema(char *content, char*fileName) { return size; } -int tgReadSchema(const char *fileName) { +int tgReadSchema(char *fileName) { FILE *fp = fopen(fileName, "r"); if (fp == NULL) { return -1; @@ -262,6 +262,8 @@ int tgReadSchema(const char *fileName) { size_t result = fread(content, 1, contentSize, fp); if (result != contentSize) { httpError("failed to read telegraf schema file:%s", fileName); + fclose(fp); + free(content); return -1; } @@ -570,7 +572,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { orderTagsLen = orderTagsLen < TSDB_MAX_TAGS ? orderTagsLen : TSDB_MAX_TAGS; table_cmd->tagNum = stable_cmd->tagNum = (int8_t)orderTagsLen; - table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint); + table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%" PRId64, timestamp->valueint); // stable name char *stname = tgGetStableName(name->valuestring, fields, fieldsSize); @@ -591,7 +593,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { if (tag->type == cJSON_String) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring); else if (tag->type == cJSON_Number) - stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "%ld", tag->valueint); + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "%" PRId64, tag->valueint); else if (tag->type == cJSON_True) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "1"); else if (tag->type == cJSON_False) @@ -612,7 +614,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "_%s", tag->valuestring); else if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "_%ld", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "_%" PRId64, tag->valueint); else if (tag->type == cJSON_False) httpAddToSqlCmdBufferNoTerminal(pContext, "_0"); else if (tag->type == cJSON_True) @@ -668,7 +670,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { cJSON *tag = orderedTags[i]; if (i != orderTagsLen - 1) { if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "%ld,", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "%" PRId64 ",", tag->valueint); else if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "'%s',", tag->valuestring); else if (tag->type == cJSON_False) @@ -680,7 +682,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { } } else { if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "%ld)", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "%" PRId64 ")", tag->valueint); else if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "'%s')", tag->valuestring); else if (tag->type == cJSON_False) @@ -693,7 +695,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { } } - httpAddToSqlCmdBufferNoTerminal(pContext, " values(%ld,", timestamp->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, " values(%" PRId64 ",", timestamp->valueint); for (int i = 0; i < fieldsSize; ++i) { cJSON *field = cJSON_GetArrayItem(fields, i); if (i != fieldsSize - 1) { diff --git a/src/modules/monitor/src/monitorSystem.c b/src/modules/monitor/src/monitorSystem.c index c5f65eef3625bfca7809758aa0157954ef81fb1a..f403a272935b67db3f8cdded152b41be19f22fd5 100644 --- a/src/modules/monitor/src/monitorSystem.c +++ b/src/modules/monitor/src/monitorSystem.c @@ -14,6 +14,7 @@ */ #include "monitor.h" +#include #include #include #include @@ -113,11 +114,7 @@ void monitorInitConn(void *para, void *unused) { monitor->state = MONITOR_STATE_INITIALIZING; if (monitor->privateIpStr[0] == 0) { -#ifdef CLUSTER strcpy(monitor->privateIpStr, tsPrivateIp); -#else - strcpy(monitor->privateIpStr, tsInternalIp); -#endif for (int i = 0; i < TSDB_IPv4ADDR_LEN; ++i) { if (monitor->privateIpStr[i] == '.') { monitor->privateIpStr[i] = '_'; @@ -167,11 +164,7 @@ void dnodeBuildMonitorSql(char *sql, int cmd) { tsMonitorDbName, IP_LEN_STR + 1); } else if (cmd == MONITOR_CMD_CREATE_TB_DN) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn_%s using %s.dn tags('%s')", tsMonitorDbName, -#ifdef CLUSTER monitor->privateIpStr, tsMonitorDbName, tsPrivateIp); -#else - monitor->privateIpStr, tsMonitorDbName, tsInternalIp); -#endif } else if (cmd == MONITOR_CMD_CREATE_MT_ACCT) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct(ts timestamp " @@ -224,12 +217,8 @@ void monitorInitDatabaseCb(void *param, TAOS_RES *result, int code) { if (monitor->cmdIndex == MONITOR_CMD_CREATE_TB_LOG) { taosLogFp = monitorSaveLog; taosLogSqlFp = monitorExecuteSQL; -#ifdef CLUSTER taosLogAcctFp = monitorSaveAcctLog; monitorLPrint("dnode:%s is started", tsPrivateIp); -#else - monitorLPrint("dnode:%s is started", tsInternalIp); -#endif } monitor->cmdIndex++; monitorInitDatabase(); @@ -245,11 +234,7 @@ void monitorStopSystem() { return; } -#ifdef CLUSTER monitorLPrint("dnode:%s monitor module is stopped", tsPrivateIp); -#else - monitorLPrint("dnode:%s monitor module is stopped", tsInternalIp); -#endif monitor->state = MONITOR_STATE_STOPPED; taosLogFp = NULL; if (monitor->initTimer != NULL) { @@ -376,7 +361,7 @@ void monitorSaveSystemInfo() { int64_t ts = taosGetTimestampUs(); char * sql = monitor->sql; - int pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn_%s values(%ld", tsMonitorDbName, monitor->privateIpStr, ts); + int pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn_%s values(%" PRId64, tsMonitorDbName, monitor->privateIpStr, ts); pos += monitorBuildCpuSql(sql + pos); pos += monitorBuildMemorySql(sql + pos); @@ -402,21 +387,29 @@ void monitorSaveAcctLog(char *acctId, int64_t currentPointsPerSecond, int64_t ma char sql[1024] = {0}; sprintf(sql, "insert into %s.acct_%s using %s.acct tags('%s') values(now" - ", %ld, %ld " - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" - ", %ld, %ld" + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 + ", %" PRId64 ", %" PRId64 ", %d)", - tsMonitorDbName, acctId, tsMonitorDbName, acctId, currentPointsPerSecond, maxPointsPerSecond, totalTimeSeries, - maxTimeSeries, totalStorage, maxStorage, totalQueryTime, maxQueryTime, totalInbound, maxInbound, - totalOutbound, maxOutbound, totalDbs, maxDbs, totalUsers, maxUsers, totalStreams, maxStreams, totalConns, - maxConns, accessState); + tsMonitorDbName, acctId, tsMonitorDbName, acctId, + currentPointsPerSecond, maxPointsPerSecond, + totalTimeSeries, maxTimeSeries, + totalStorage, maxStorage, + totalQueryTime, maxQueryTime, + totalInbound, maxInbound, + totalOutbound, maxOutbound, + totalDbs, maxDbs, + totalUsers, maxUsers, + totalStreams, maxStreams, + totalConns, maxConns, + accessState); monitorTrace("monitor:%p, save account info, sql %s", monitor->conn, sql); taos_query_a(monitor->conn, sql, dnodeMontiorInsertAcctCallback, "account"); @@ -431,7 +424,7 @@ void monitorSaveLog(int level, const char *const format, ...) { return; } - int len = snprintf(sql, (size_t)max_length, "import into %s.log values(%ld, %d,'", tsMonitorDbName, + int len = snprintf(sql, (size_t)max_length, "import into %s.log values(%" PRId64 ", %d,'", tsMonitorDbName, taosGetTimestampUs(), level); va_start(argpointer, format); @@ -439,11 +432,7 @@ void monitorSaveLog(int level, const char *const format, ...) { va_end(argpointer); if (len > max_length) len = max_length; -#ifdef CLUSTER len += sprintf(sql + len, "', '%s')", tsPrivateIp); -#else - len += sprintf(sql + len, "', '%s')", tsInternalIp); -#endif sql[len++] = 0; monitorTrace("monitor:%p, save log, sql: %s", monitor->conn, sql); diff --git a/src/os/darwin/inc/os.h b/src/os/darwin/inc/os.h index bf86103e8400725b991a9716de25b2018ec5a61d..1aececeec9ad3f3447fe46bb03dc61ddfb9ec8f2 100644 --- a/src/os/darwin/inc/os.h +++ b/src/os/darwin/inc/os.h @@ -1,57 +1,85 @@ /* -* Copyright (c) 2019 TAOS Data, Inc. -* -* This program is free software: you can use, redistribute, and/or modify -* it under the terms of the GNU Affero General Public License, version 3 -* or later ("AGPL"), as published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, but WITHOUT -* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -* FITNESS FOR A PARTICULAR PURPOSE. -* -* You should have received a copy of the GNU Affero General Public License -* along with this program. If not, see . -*/ - - -#ifndef TDENGINE_PLATFORM_DARWIN_H -#define TDENGINE_PLATFORM_DARWIN_H + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_PLATFORM_LINUX_H +#define TDENGINE_PLATFORM_LINUX_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include -#include -#include -#include -#include -#include -#include #include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include #include +#include #include +#include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #define htobe64 htonll #define taosCloseSocket(x) \ { \ - if (FD_VALID(x)) { \ + if (FD_VALID(x)) { \ close(x); \ - x = -1; \ + x = FD_INITIALIZER; \ } \ } + #define taosWriteSocket(fd, buf, len) write(fd, buf, len) #define taosReadSocket(fd, buf, len) read(fd, buf, len) @@ -160,7 +188,7 @@ (__a < __b) ? __a : __b; \ }) -#define MILLISECOND_PER_SECOND (1000L) +#define MILLISECOND_PER_SECOND ((int64_t)1000L) #define tsem_t dispatch_semaphore_t @@ -197,6 +225,10 @@ bool taosSkipSocketCheck(); bool taosGetDisk(); +int fsendfile(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count); + +void taosSetCoreDump(); + typedef int(*__compar_fn_t)(const void *, const void *); // for send function in tsocket.c @@ -209,9 +241,18 @@ typedef int(*__compar_fn_t)(const void *, const void *); #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE #endif +#ifndef _TD_ARM_32_ #define BUILDIN_CLZL(val) __builtin_clzl(val) -#define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZL(val) __builtin_ctzl(val) +#else +#define BUILDIN_CLZL(val) __builtin_clzll(val) +#define BUILDIN_CTZL(val) __builtin_ctzll(val) +#endif +#define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZ(val) __builtin_ctz(val) -#endif \ No newline at end of file +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/darwin/src/tdarwin.c b/src/os/darwin/src/tdarwin.c index 133bb4893cc9aa2f8b561036ffaff6a53e0db3a7..af3b1bd8a50d9f52d5f81f4dd42f483c510866c9 100644 --- a/src/os/darwin/src/tdarwin.c +++ b/src/os/darwin/src/tdarwin.c @@ -33,11 +33,12 @@ #include "tsdb.h" #include "tutil.h" -char configDir[TSDB_FILENAME_LEN] = "~/TDengine/cfg"; -char tsDirectory[TSDB_FILENAME_LEN] = "~/TDengine/data"; -char dataDir[TSDB_FILENAME_LEN] = "~/TDengine/data"; -char logDir[TSDB_FILENAME_LEN] = "~/TDengine/log"; -char scriptDir[TSDB_FILENAME_LEN] = "~/TDengine/script"; +char configDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char tsDirectory[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char dataDir[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char logDir[TSDB_FILENAME_LEN] = "~/TDengineLog"; +char scriptDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char osName[] = "Darwin"; int64_t str2int64(char *str) { char *endptr = NULL; @@ -418,4 +419,43 @@ int32_t __sync_val_load_32(int32_t *ptr) { void __sync_val_restore_32(int32_t *ptr, int32_t newval) { __atomic_store_n(ptr, newval, __ATOMIC_RELEASE); -} \ No newline at end of file +} + +#define _SEND_FILE_STEP_ 1000 + +int fsendfile(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count) { + fseek(in_file, (int32_t)(*offset), 0); + int writeLen = 0; + uint8_t buffer[_SEND_FILE_STEP_] = { 0 }; + + for (int len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { + size_t rlen = fread(buffer, 1, _SEND_FILE_STEP_, in_file); + if (rlen <= 0) { + return writeLen; + } + else if (rlen < _SEND_FILE_STEP_) { + fwrite(buffer, 1, rlen, out_file); + return (int)(writeLen + rlen); + } + else { + fwrite(buffer, 1, _SEND_FILE_STEP_, in_file); + writeLen += _SEND_FILE_STEP_; + } + } + + int remain = count - writeLen; + if (remain > 0) { + size_t rlen = fread(buffer, 1, remain, in_file); + if (rlen <= 0) { + return writeLen; + } + else { + fwrite(buffer, 1, remain, out_file); + writeLen += remain; + } + } + + return writeLen; +} + +void taosSetCoreDump() {} \ No newline at end of file diff --git a/src/os/linux/inc/os.h b/src/os/linux/inc/os.h index a3d50400c30874fd48cd3dc3d005682a23c58ea1..aa54a3563ec08b2c83d7dd34db427399cfa93a3e 100644 --- a/src/os/linux/inc/os.h +++ b/src/os/linux/inc/os.h @@ -23,6 +23,10 @@ extern "C" { #include #include +#ifndef _ALPINE + #include +#endif + #include #include #include @@ -71,13 +75,14 @@ extern "C" { #include #include #include +#include #define taosCloseSocket(x) \ { \ if (FD_VALID(x)) { \ close(x); \ - x = -1; \ + x = FD_INITIALIZER; \ } \ } @@ -227,9 +232,22 @@ void taosSetCoreDump(); void taosBlockSIGPIPE(); +#ifdef _ALPINE + typedef int(*__compar_fn_t)(const void *, const void *); + void error (int, int, const char *); + #ifndef PTHREAD_MUTEX_RECURSIVE_NP + #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE + #endif +#endif + +#ifndef _TD_ARM_32_ #define BUILDIN_CLZL(val) __builtin_clzl(val) -#define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZL(val) __builtin_ctzl(val) +#else +#define BUILDIN_CLZL(val) __builtin_clzll(val) +#define BUILDIN_CTZL(val) __builtin_ctzll(val) +#endif +#define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZ(val) __builtin_ctz(val) #ifdef __cplusplus diff --git a/src/os/linux/src/tlinux.c b/src/os/linux/src/tlinux.c index 83782f49440218846a7aac26bfb9f00d6bac4c5b..b81b98a5f7fb9cea96879c570a05fbca8e4e74b5 100644 --- a/src/os/linux/src/tlinux.c +++ b/src/os/linux/src/tlinux.c @@ -39,6 +39,7 @@ char tsDirectory[TSDB_FILENAME_LEN] = "/var/lib/taos"; char dataDir[TSDB_FILENAME_LEN] = "/var/lib/taos"; char logDir[TSDB_FILENAME_LEN] = "/var/log/taos"; char scriptDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char osName[] = "Linux"; int64_t str2int64(char *str) { char *endptr = NULL; @@ -234,8 +235,15 @@ void *taosProcessAlarmSignal(void *tharg) { timer_t timerId; struct sigevent sevent; - sevent.sigev_notify = SIGEV_THREAD_ID; - sevent._sigev_un._tid = syscall(__NR_gettid); + + #ifdef _ALPINE + sevent.sigev_notify = SIGEV_THREAD; + sevent.sigev_value.sival_int = syscall(__NR_gettid); + #else + sevent.sigev_notify = SIGEV_THREAD_ID; + sevent._sigev_un._tid = syscall(__NR_gettid); + #endif + sevent.sigev_signo = SIGALRM; if (timer_create(CLOCK_REALTIME, &sevent, &timerId) == -1) { @@ -264,7 +272,6 @@ void *taosProcessAlarmSignal(void *tharg) { callback(0); } - assert(0); return NULL; } diff --git a/src/os/linux/src/tsystem.c b/src/os/linux/src/tsystem.c index 03192bb9cc49bd8ba613662d27ed4d38364dd7ac..8cd0e6943616f4ecb1f69ed100ca6535968005ae 100644 --- a/src/os/linux/src/tsystem.c +++ b/src/os/linux/src/tsystem.c @@ -12,7 +12,7 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - +#include #include #include #include @@ -27,12 +27,15 @@ #include #include #include -#include -#include #include #include #include + +#ifdef _ALPINE #include +#else +#include +#endif #include "tglobalcfg.h" #include "tlog.h" @@ -99,7 +102,7 @@ bool taosGetProcMemory(float *memoryUsedMB) { int64_t memKB = 0; char tmp[10]; - sscanf(line, "%s %ld", tmp, &memKB); + sscanf(line, "%s %" PRId64, tmp, &memKB); *memoryUsedMB = (float)((double)memKB / 1024); tfree(line); @@ -124,7 +127,7 @@ bool taosGetSysCpuInfo(SysCpuInfo *cpuInfo) { } char cpu[10] = {0}; - sscanf(line, "%s %ld %ld %ld %ld", cpu, &cpuInfo->user, &cpuInfo->nice, &cpuInfo->system, &cpuInfo->idle); + sscanf(line, "%s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, cpu, &cpuInfo->user, &cpuInfo->nice, &cpuInfo->system, &cpuInfo->idle); tfree(line); fclose(fp); @@ -150,7 +153,7 @@ bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) { for (int i = 0, blank = 0; line[i] != 0; ++i) { if (line[i] == ' ') blank++; if (blank == PROCESS_ITEM) { - sscanf(line + i + 1, "%ld %ld %ld %ld", &cpuInfo->utime, &cpuInfo->stime, &cpuInfo->cutime, &cpuInfo->cstime); + sscanf(line + i + 1, "%" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, &cpuInfo->utime, &cpuInfo->stime, &cpuInfo->cutime, &cpuInfo->cstime); break; } } @@ -381,8 +384,8 @@ bool taosGetCardName(char *ip, char *name) { bool taosGetCardInfo(int64_t *bytes) { static char tsPublicCard[1000] = {0}; if (tsPublicCard[0] == 0) { - if (!taosGetCardName(tsInternalIp, tsPublicCard)) { - pError("can't get card name from ip:%s", tsInternalIp); + if (!taosGetCardName(tsPrivateIp, tsPublicCard)) { + pError("can't get card name from ip:%s", tsPrivateIp); return false; } int cardNameLen = (int)strlen(tsPublicCard); @@ -420,7 +423,7 @@ bool taosGetCardInfo(int64_t *bytes) { } } if (line != NULL) { - sscanf(line, "%s %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld", nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, + sscanf(line, "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64, nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &tbytes, &tpackets); *bytes = rbytes + tbytes; tfree(line); @@ -488,10 +491,10 @@ bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { break; } if (strstr(line, "rchar:") != NULL) { - sscanf(line, "%s %ld", tmp, readbyte); + sscanf(line, "%s %" PRId64, tmp, readbyte); readIndex++; } else if (strstr(line, "wchar:") != NULL) { - sscanf(line, "%s %ld", tmp, writebyte); + sscanf(line, "%s %" PRId64, tmp, writebyte); readIndex++; } else { } @@ -514,7 +517,8 @@ bool taosGetProcIO(float *readKB, float *writeKB) { static int64_t lastReadbyte = -1; static int64_t lastWritebyte = -1; - int64_t curReadbyte, curWritebyte; + int64_t curReadbyte = 0; + int64_t curWritebyte = 0; if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { return false; @@ -564,9 +568,9 @@ void taosGetSystemInfo() { } void tsPrintOsInfo() { - pPrint(" os pageSize: %ld(KB)", tsPageSize); - pPrint(" os openMax: %ld", tsOpenMax); - pPrint(" os streamMax: %ld", tsStreamMax); + pPrint(" os pageSize: %" PRId64 "(KB)", tsPageSize); + pPrint(" os openMax: %" PRId64, tsOpenMax); + pPrint(" os streamMax: %" PRId64, tsStreamMax); pPrint(" os numOfCores: %d", tsNumOfCores); pPrint(" os totalDisk: %f(GB)", tsTotalDataDirGB); pPrint(" os totalMemory: %d(MB)", tsTotalMemoryMB); diff --git a/src/os/windows/inc/os.h b/src/os/windows/inc/os.h index 9c0add2c319829e10d192de8a94dc43038c3155d..3f957e8abd489abdbe7bb009b3d4fa47aadb4467 100644 --- a/src/os/windows/inc/os.h +++ b/src/os/windows/inc/os.h @@ -17,10 +17,10 @@ #define TDENGINE_PLATFORM_WINDOWS_H #include -#include +#include #include -#include -#include +#include +#include #include #include #include @@ -35,12 +35,15 @@ #include #include #include -#include -#include +#include +#include #include +#include #include "winsock2.h" #include +#include + #ifdef __cplusplus extern "C" { #endif @@ -74,7 +77,13 @@ extern "C" { #define strncasecmp _strnicmp #define wcsncasecmp _wcsnicmp #define strtok_r strtok_s -#define str2int64 _atoi64 +#ifdef _TD_GO_DLL_ + int64_t str2int64(char *str); + uint64_t htonll(uint64_t val); +#else + #define str2int64 _atoi64 +#endif + #define snprintf _snprintf #define in_addr_t unsigned long #define socklen_t int @@ -135,7 +144,12 @@ extern "C" { #define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val)) #define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val)) -#define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval)) +#ifdef _TD_GO_DLL_ + #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap +#else + #define atomic_val_compare_exchange_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval)) +#endif + #define atomic_val_compare_exchange_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval)) #define atomic_val_compare_exchange_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval)) #define atomic_val_compare_exchange_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval)) @@ -155,9 +169,14 @@ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val); #else #define atomic_add_fetch_ptr atomic_add_fetch_32 #endif +#ifdef _TD_GO_DLL_ + #define atomic_fetch_add_8 __sync_fetch_and_ad + #define atomic_fetch_add_16 __sync_fetch_and_add +#else + #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val)) +#endif -#define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val)) #define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val)) #define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -185,14 +204,17 @@ __int64 interlocked_add_fetch_64(__int64 volatile *ptr, __int64 val); #else #define atomic_fetch_sub_ptr atomic_fetch_sub_32 #endif - -char interlocked_and_fetch_8(char volatile* ptr, char val); -short interlocked_and_fetch_16(short volatile* ptr, short val); +#ifndef _TD_GO_DLL_ + char interlocked_and_fetch_8(char volatile* ptr, char val); + short interlocked_and_fetch_16(short volatile* ptr, short val); +#endif long interlocked_and_fetch_32(long volatile* ptr, long val); __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val); -#define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val)) -#define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_and_fetch_8(ptr, val) interlocked_and_fetch_8((char volatile*)(ptr), (char)(val)) + #define atomic_and_fetch_16(ptr, val) interlocked_and_fetch_16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_and_fetch_32(ptr, val) interlocked_and_fetch_32((long volatile*)(ptr), (long)(val)) #define atomic_and_fetch_64(ptr, val) interlocked_and_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -200,9 +222,10 @@ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val); #else #define atomic_and_fetch_ptr atomic_and_fetch_32 #endif - -#define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_fetch_and_8(ptr, val) _InterlockedAnd8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_and_16(ptr, val) _InterlockedAnd16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_fetch_and_32(ptr, val) _InterlockedAnd((long volatile*)(ptr), (long)(val)) #ifdef _M_IX86 @@ -217,14 +240,17 @@ __int64 interlocked_and_fetch_64(__int64 volatile* ptr, __int64 val); #else #define atomic_fetch_and_ptr atomic_fetch_and_32 #endif - -char interlocked_or_fetch_8(char volatile* ptr, char val); -short interlocked_or_fetch_16(short volatile* ptr, short val); +#ifndef _TD_GO_DLL_ + char interlocked_or_fetch_8(char volatile* ptr, char val); + short interlocked_or_fetch_16(short volatile* ptr, short val); +#endif long interlocked_or_fetch_32(long volatile* ptr, long val); __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val); -#define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val)) -#define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_or_fetch_8(ptr, val) interlocked_or_fetch_8((char volatile*)(ptr), (char)(val)) + #define atomic_or_fetch_16(ptr, val) interlocked_or_fetch_16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_or_fetch_32(ptr, val) interlocked_or_fetch_32((long volatile*)(ptr), (long)(val)) #define atomic_or_fetch_64(ptr, val) interlocked_or_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -232,9 +258,10 @@ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val); #else #define atomic_or_fetch_ptr atomic_or_fetch_32 #endif - -#define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_fetch_or_8(ptr, val) _InterlockedOr8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_or_16(ptr, val) _InterlockedOr16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_fetch_or_32(ptr, val) _InterlockedOr((long volatile*)(ptr), (long)(val)) #ifdef _M_IX86 @@ -250,13 +277,17 @@ __int64 interlocked_or_fetch_64(__int64 volatile* ptr, __int64 val); #define atomic_fetch_or_ptr atomic_fetch_or_32 #endif -char interlocked_xor_fetch_8(char volatile* ptr, char val); -short interlocked_xor_fetch_16(short volatile* ptr, short val); +#ifndef _TD_GO_DLL_ + char interlocked_xor_fetch_8(char volatile* ptr, char val); + short interlocked_xor_fetch_16(short volatile* ptr, short val); +#endif long interlocked_xor_fetch_32(long volatile* ptr, long val); __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val); -#define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val)) -#define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_xor_fetch_8(ptr, val) interlocked_xor_fetch_8((char volatile*)(ptr), (char)(val)) + #define atomic_xor_fetch_16(ptr, val) interlocked_xor_fetch_16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_xor_fetch_32(ptr, val) interlocked_xor_fetch_32((long volatile*)(ptr), (long)(val)) #define atomic_xor_fetch_64(ptr, val) interlocked_xor_fetch_64((__int64 volatile*)(ptr), (__int64)(val)) #ifdef _WIN64 @@ -265,8 +296,10 @@ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val); #define atomic_xor_fetch_ptr atomic_xor_fetch_32 #endif -#define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val)) -#define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val)) +#ifndef _TD_GO_DLL_ + #define atomic_fetch_xor_8(ptr, val) _InterlockedXor8((char volatile*)(ptr), (char)(val)) + #define atomic_fetch_xor_16(ptr, val) _InterlockedXor16((short volatile*)(ptr), (short)(val)) +#endif #define atomic_fetch_xor_32(ptr, val) _InterlockedXor((long volatile*)(ptr), (long)(val)) #ifdef _M_IX86 @@ -292,7 +325,11 @@ __int64 interlocked_xor_fetch_64(__int64 volatile* ptr, __int64 val); #define MAX(a,b) (((a)>(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) -#define MILLISECOND_PER_SECOND (1000i64) +#ifdef _TD_GO_DLL_ + #define MILLISECOND_PER_SECOND (1000LL) +#else + #define MILLISECOND_PER_SECOND (1000i64) +#endif #define tsem_t sem_t #define tsem_init sem_init diff --git a/src/os/windows/src/twindows.c b/src/os/windows/src/twindows.c index 98be6b60ba16e52b2177971d95930f7f717785aa..30973165dfd03f6179f4fc36a83c4b4a32ed3e55 100644 --- a/src/os/windows/src/twindows.c +++ b/src/os/windows/src/twindows.c @@ -28,11 +28,16 @@ #include "tsdb.h" #include "tglobalcfg.h" +#include +#include +#include + char configDir[TSDB_FILENAME_LEN] = "C:/TDengine/cfg"; char tsDirectory[TSDB_FILENAME_LEN] = "C:/TDengine/data"; char logDir[TSDB_FILENAME_LEN] = "C:/TDengine/log"; char dataDir[TSDB_FILENAME_LEN] = "C:/TDengine/data"; char scriptDir[TSDB_FILENAME_LEN] = "C:/TDengine/script"; +char osName[] = "Windows"; bool taosCheckPthreadValid(pthread_t thread) { return thread.p != NULL; @@ -68,11 +73,19 @@ int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optle // add char interlocked_add_fetch_8(char volatile* ptr, char val) { - return _InterlockedExchangeAdd8(ptr, val) + val; + #ifdef _TD_GO_DLL_ + return __sync_fetch_and_add(ptr, val) + val; + #else + return _InterlockedExchangeAdd8(ptr, val) + val; + #endif } short interlocked_add_fetch_16(short volatile* ptr, short val) { - return _InterlockedExchangeAdd16(ptr, val) + val; + #ifdef _TD_GO_DLL_ + return __sync_fetch_and_add(ptr, val) + val; + #else + return _InterlockedExchangeAdd16(ptr, val) + val; + #endif } long interlocked_add_fetch_32(long volatile* ptr, long val) { @@ -84,6 +97,7 @@ __int64 interlocked_add_fetch_64(__int64 volatile* ptr, __int64 val) { } // and +#ifndef _TD_GO_DLL_ char interlocked_and_fetch_8(char volatile* ptr, char val) { return _InterlockedAnd8(ptr, val) & val; } @@ -91,6 +105,7 @@ char interlocked_and_fetch_8(char volatile* ptr, char val) { short interlocked_and_fetch_16(short volatile* ptr, short val) { return _InterlockedAnd16(ptr, val) & val; } +#endif long interlocked_and_fetch_32(long volatile* ptr, long val) { return _InterlockedAnd(ptr, val) & val; @@ -124,6 +139,7 @@ __int64 interlocked_fetch_and_64(__int64 volatile* ptr, __int64 val) { #endif // or +#ifndef _TD_GO_DLL_ char interlocked_or_fetch_8(char volatile* ptr, char val) { return _InterlockedOr8(ptr, val) | val; } @@ -131,7 +147,7 @@ char interlocked_or_fetch_8(char volatile* ptr, char val) { short interlocked_or_fetch_16(short volatile* ptr, short val) { return _InterlockedOr16(ptr, val) | val; } - +#endif long interlocked_or_fetch_32(long volatile* ptr, long val) { return _InterlockedOr(ptr, val) | val; } @@ -164,6 +180,7 @@ __int64 interlocked_fetch_or_64(__int64 volatile* ptr, __int64 val) { #endif // xor +#ifndef _TD_GO_DLL_ char interlocked_xor_fetch_8(char volatile* ptr, char val) { return _InterlockedXor8(ptr, val) ^ val; } @@ -171,7 +188,7 @@ char interlocked_xor_fetch_8(char volatile* ptr, char val) { short interlocked_xor_fetch_16(short volatile* ptr, short val) { return _InterlockedXor16(ptr, val) ^ val; } - +#endif long interlocked_xor_fetch_32(long volatile* ptr, long val) { return _InterlockedXor(ptr, val) ^ val; } @@ -396,4 +413,16 @@ char *strndup(const char *s, size_t n) { return r; } -void taosSetCoreDump() {} \ No newline at end of file +void taosSetCoreDump() {} + +#ifdef _TD_GO_DLL_ +int64_t str2int64(char *str) { + char *endptr = NULL; + return strtoll(str, &endptr, 10); +} + +uint64_t htonll(uint64_t val) +{ + return (((uint64_t) htonl(val)) << 32) + htonl(val >> 32); +} +#endif \ No newline at end of file diff --git a/src/rpc/src/trpc.c b/src/rpc/src/trpc.c index 6f32c5488851a0682cc0c8b74c91ec161dc4c397..87506861b1a660fa7745012a45120942a2772ec4 100755 --- a/src/rpc/src/trpc.c +++ b/src/rpc/src/trpc.c @@ -31,8 +31,6 @@ #include "tutil.h" #include "lz4.h" -#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" - typedef struct _msg_node { struct _msg_node *next; void * ahandle; @@ -58,7 +56,7 @@ typedef struct { uint16_t tranId; // outgoing transcation ID, for build message uint16_t outTranId; // outgoing transcation ID uint16_t inTranId; - char outType; + uint8_t outType; char inType; char closing; char rspReceived; @@ -171,7 +169,7 @@ static int32_t taosCompressRpcMsg(char* pCont, int32_t contLen) { memcpy(pCont + overhead, buf, compLen); pHeader->comp = 1; - tTrace("compress rpc msg, before:%lld, after:%lld", contLen, compLen); + tTrace("compress rpc msg, before:%d, after:%d", contLen, compLen); finalLen = compLen + overhead; //tDump(pCont, contLen); @@ -203,7 +201,7 @@ static STaosHeader* taosDecompressRpcMsg(STaosHeader* pHeader, SSchedMsg* pSched //tDump(pHeader->content, msgLen); if (buf) { - int32_t originalLen = LZ4_decompress_safe(pHeader->content + overhead, buf + sizeof(STaosHeader), + int32_t originalLen = LZ4_decompress_safe((const char*)(pHeader->content + overhead), buf + sizeof(STaosHeader), msgLen - overhead, contLen); memcpy(buf, pHeader, sizeof(STaosHeader)); @@ -220,6 +218,8 @@ static STaosHeader* taosDecompressRpcMsg(STaosHeader* pHeader, SSchedMsg* pSched tError("failed to allocate memory to decompress msg, contLen:%d, reason:%s", contLen, strerror(errno)); pSchedMsg->msg = NULL; } + + return NULL; } char *taosBuildReqHeader(void *param, char type, char *msg) { @@ -239,13 +239,14 @@ char *taosBuildReqHeader(void *param, char type, char *msg) { pHeader->spi = 0; pHeader->tcp = 0; pHeader->encrypt = 0; - pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1); - if (pHeader->tranId == 0) pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1); + pHeader->tranId = atomic_add_fetch_16(&pConn->tranId, 1); + if (pHeader->tranId == 0) pHeader->tranId = atomic_add_fetch_16(&pConn->tranId, 1); pHeader->sourceId = pConn->ownId; pHeader->destId = pConn->peerId; pHeader->port = 0; - pHeader->uid = (uint32_t)pConn + (uint32_t)getpid(); + + pHeader->uid = (uint32_t)((int64_t)pConn + (int64_t)getpid()); memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); @@ -276,7 +277,9 @@ char *taosBuildReqMsgWithSize(void *param, char type, int size) { pHeader->sourceId = pConn->ownId; pHeader->destId = pConn->peerId; - pHeader->uid = (uint32_t)pConn + (uint32_t)getpid(); + + pHeader->uid = (uint32_t)((int64_t)pConn + (int64_t)getpid()); + memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); return (char *)pHeader->content; @@ -325,6 +328,10 @@ int taosSendSimpleRsp(void *thandle, char rsptype, char code) { } pStart = taosBuildRspMsgWithSize(thandle, rsptype, 32); + if (pStart == NULL) { + tError("build rsp msg error, return null prt"); + return -1; + } pMsg = pStart; *pMsg = code; @@ -1222,6 +1229,7 @@ int taosSendMsgToPeerH(void *thandle, char *pCont, int contLen, void *ahandle) { pServer = pConn->pServer; pChann = pServer->channList + pConn->chann; pHeader = (STaosHeader *)(pCont - sizeof(STaosHeader)); + pHeader->destIp = pConn->peerIp; msg = (char *)pHeader; if ((pHeader->msgType & 1U) == 0 && pConn->localPort) pHeader->port = pConn->localPort; diff --git a/src/rpc/src/tstring.c b/src/rpc/src/tstring.c index cf40fb50580d50aa5c7d26197dc3248feb2724e3..a254ceecfd1f6ce13b1cc30f8c0c87b6b8edfca9 100644 --- a/src/rpc/src/tstring.c +++ b/src/rpc/src/tstring.c @@ -231,15 +231,17 @@ char *tsError[] = {"success", "batch size too big", "timestamp out of range", //105 "invalid query message", - "timestamp disordered in cache block", + "too many results from vnodes for sort", "timestamp disordered in file block", "invalid commit log", "no disk space on server", //110 "only super table has metric meta info", "tags value not unique for join", "invalid submit message", - "not active table(not created yet or deleted already)", //114 - "invalid table id", - "invalid vnode status", //116 + "not active table(not created yet or dropped already)", + "invalid table id", // 115 + "invalid vnode status", "failed to lock resources", + "table id/uid mismatch", + "client query cache erased", // 119 }; diff --git a/src/rpc/src/ttcpserver.c b/src/rpc/src/ttcpserver.c index 29ada20bc427455edf8c5b771178a33aa8214ba3..663bfcdf8ee86008b0cfa4803725af1866a950bf 100644 --- a/src/rpc/src/ttcpserver.c +++ b/src/rpc/src/ttcpserver.c @@ -447,6 +447,7 @@ void *taosInitTcpServer(char *ip, uint16_t port, char *label, int numOfThreads, return (void *)pServerObj; } +#if 0 void taosListTcpConnection(void *handle, char *buffer) { SServerObj *pServerObj; SThreadObj *pThreadObj; @@ -468,7 +469,7 @@ void taosListTcpConnection(void *handle, char *buffer) { msg = msg + strlen(msg); pFdObj = pThreadObj->pHead; while (pFdObj) { - sprintf(" ip:%s port:%hu\n", pFdObj->ipstr, pFdObj->port); + sprintf(msg, " ip:%s port:%hu\n", pFdObj->ipstr, pFdObj->port); msg = msg + strlen(msg); numOfFds++; numOfConns++; @@ -486,6 +487,7 @@ void taosListTcpConnection(void *handle, char *buffer) { return; } +#endif int taosSendTcpServerData(uint32_t ip, uint16_t port, char *data, int len, void *chandle) { SFdObj *pFdObj = (SFdObj *)chandle; diff --git a/src/rpc/src/tudp.c b/src/rpc/src/tudp.c index fb0b37d93baaf98a6caed35ec980de72090819f2..db3e5e81c43754abe46cd907463737ceeb2b1116 100644 --- a/src/rpc/src/tudp.c +++ b/src/rpc/src/tudp.c @@ -296,7 +296,7 @@ void *taosTransferDataViaTcp(void *argv) { } if (!taosCheckHandleViaTcpValid(&handleViaTcp)) { - tError("%s UDP server read handle via tcp invalid, handle:%ld, hash:%ld", pSet->label, handleViaTcp.handle, + tError("%s UDP server read handle via tcp invalid, handle:%" PRIu64 ", hash:%" PRIu64, pSet->label, handleViaTcp.handle, handleViaTcp.hash); taosCloseSocket(connFd); free(pTransfer); @@ -698,12 +698,17 @@ int taosSendPacketViaTcp(uint32_t ip, uint16_t port, char *data, int dataLen, vo // send a UDP header first to set up the connection pHead = (STaosHeader *)buffer; memcpy(pHead, data, sizeof(STaosHeader)); + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wbitfield-constant-conversion" pHead->tcp = 2; +#pragma GCC diagnostic pop + msgLen = sizeof(STaosHeader); pHead->msgLen = (int32_t)htonl(msgLen); code = taosSendUdpData(ip, port, buffer, msgLen, chandle); - pHead = (STaosHeader *)data; + //pHead = (STaosHeader *)data; tinet_ntoa(ipstr, ip); int fd = taosOpenTcpClientSocket(ipstr, pConn->port, tsLocalIp); diff --git a/src/sdb/inc/sdbint.h b/src/sdb/inc/sdbint.h index 3327c1f7317cc6483346a7a7a7259b88a69ef6f7..c5b4f4e4aeccf6bd8664e067e7d74146e023b10b 100644 --- a/src/sdb/inc/sdbint.h +++ b/src/sdb/inc/sdbint.h @@ -127,7 +127,7 @@ typedef struct { } SMnodeStatus; typedef struct { - char dbId; + uint8_t dbId; char type; uint64_t version; short dataLen; diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c index 024360501f908a1a4e9a68f789969f88c136d098..77b6f6d958bce36163adac74550060862f5eb1ec 100644 --- a/src/sdb/src/sdbEngine.c +++ b/src/sdb/src/sdbEngine.c @@ -24,6 +24,8 @@ extern char version[]; const int16_t sdbFileVersion = 0; int sdbExtConns = 0; +SIpList *pSdbIpList = NULL; +SIpList *pSdbPublicIpList = NULL; #ifdef CLUSTER int sdbMaster = 0; @@ -287,7 +289,7 @@ sdb_exit1: return -1; } -void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, uint8_t keyType, char *directory, void *(*appTool)(char, void *, char *, int, int *)) { SSdbTable *pTable = (SSdbTable *)malloc(sizeof(SSdbTable)); if (pTable == NULL) return NULL; @@ -373,22 +375,22 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { */ pTable->id++; sdbVersion++; - sdbPrint("table:%s, record:%s already exist, think it successed, sdbVersion:%ld id:%d", + sdbPrint("table:%s, record:%s already exist, think it successed, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); return 0; } else { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, (char *)row, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%s sdbVersion:%" PRId64 " id:%" PRId64 , pTable->name, (char *)row, sdbVersion, pTable->id); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%s sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to insert record:%d sdbVersion:%ld id:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%d sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to insert record sdbVersion:%ld id:%d", pTable->name, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); break; } return -1; @@ -452,19 +454,19 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { pTable->numOfRows++; switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, (char *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; default: - sdbTrace("table:%s, a record is inserted, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; } @@ -555,19 +557,19 @@ int sdbDeleteRow(void *handle, void *row) { sdbAddIntoUpdateList(pTable, SDB_TYPE_DELETE, pMetaRow); switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: - sdbTrace("table:%s, a record is deleted, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, sdbVersion, pTable->id, pTable->numOfRows); break; } @@ -602,19 +604,19 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { if (pMeta == NULL) { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, (char *) row, sdbVersion, pTable->id); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *) row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, *(int32_t *) row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to update record, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); break; } @@ -674,19 +676,19 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is updated:%d, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%d, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: - sdbTrace("table:%s, a record is updated, sdbVersion:%ld id:%ld numOfRows:%d", pTable->name, sdbVersion, + sdbTrace("table:%s, a record is updated, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, sdbVersion, pTable->id, pTable->numOfRows); break; } @@ -795,7 +797,7 @@ void sdbCloseTable(void *handle) { pthread_mutex_destroy(&pTable->mutex); sdbNumOfTables--; - sdbTrace("table:%s is closed, id:%ld numOfTables:%d", pTable->name, pTable->id, sdbNumOfTables); + sdbTrace("table:%s is closed, id:%" PRId64 " numOfTables:%d", pTable->name, pTable->id, sdbNumOfTables); tfree(pTable->update); tfree(pTable); @@ -899,7 +901,7 @@ void sdbResetTable(SSdbTable *pTable) { tfree(rowHead); - sdbPrint("table:%s is updated, sdbVerion:%ld id:%ld", pTable->name, sdbVersion, pTable->id); + sdbPrint("table:%s is updated, sdbVerion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); } // TODO:A problem here :use snapshot file to sync another node will cause diff --git a/src/system/CMakeLists.txt b/src/system/CMakeLists.txt index 5c4ab62d24e99adcda504b346046dd62bbe7c335..516b9e85e23a56bdb6c191b16c0077afe63f4863 100644 --- a/src/system/CMakeLists.txt +++ b/src/system/CMakeLists.txt @@ -3,6 +3,6 @@ PROJECT(TDengine) ADD_SUBDIRECTORY(detail) -IF (TD_LITE) +IF (TD_EDGE) ADD_SUBDIRECTORY(lite) ENDIF () \ No newline at end of file diff --git a/src/system/detail/CMakeLists.txt b/src/system/detail/CMakeLists.txt index 95cce3dfe617464cb0cc442686a2802f2178cbbc..6268b97f91a359ed924a25cb8c97c5afd495d558 100644 --- a/src/system/detail/CMakeLists.txt +++ b/src/system/detail/CMakeLists.txt @@ -16,10 +16,14 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) ADD_EXECUTABLE(taosd ${SRC}) - TARGET_LINK_LIBRARIES(taosd taos_static trpc tutil sdb monitor pthread http) + IF (TD_PAGMODE_LITE) + TARGET_LINK_LIBRARIES(taosd taos trpc tutil sdb monitor pthread http) + ELSE () + TARGET_LINK_LIBRARIES(taosd taos_static trpc tutil sdb monitor pthread http) + ENDIF () - IF (TD_LITE) - TARGET_LINK_LIBRARIES(taosd taosd_lite) + IF (TD_EDGE) + TARGET_LINK_LIBRARIES(taosd taosd_edge) ELSE () TARGET_LINK_LIBRARIES(taosd taosd_cluster) ENDIF () diff --git a/src/system/detail/inc/dnodeSystem.h b/src/system/detail/inc/dnodeSystem.h index 632b723ac0741a4cfe36533e29da2b3f0971f873..96e5699464af1095ae92e74b4e96dd67ae11c81d 100644 --- a/src/system/detail/inc/dnodeSystem.h +++ b/src/system/detail/inc/dnodeSystem.h @@ -21,6 +21,7 @@ extern "C" { #endif #include +#include #define tsetModuleStatus(mod) \ { tsModuleStatus |= (1 << mod); } diff --git a/src/system/detail/inc/mgmt.h b/src/system/detail/inc/mgmt.h index d7e5a1b8a989f739727da1dc7e8fcccf0b304c06..24f9822f1616529801ef2eee8ce3ec3dc5fc6f5d 100644 --- a/src/system/detail/inc/mgmt.h +++ b/src/system/detail/inc/mgmt.h @@ -222,6 +222,8 @@ typedef struct _connObj { char superAuth : 1; // super user flag char writeAuth : 1; // write flag char killConnection : 1; // kill the connection flag + uint8_t usePublicIp : 1; // if the connection request is publicIp + uint8_t reserved : 4; uint32_t queryId; // query ID to be killed uint32_t streamId; // stream ID to be killed uint32_t ip; // shell IP @@ -343,7 +345,7 @@ void mgmtCleanUpVgroups(); int mgmtInitMeters(); STabObj *mgmtGetMeter(char *meterId); STabObj *mgmtGetMeterInfo(char *src, char *tags[]); -int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pInfo); +int mgmtRetrieveMetricMeta(SConnObj *pConn, char **pStart, SMetricMetaMsg *pInfo); int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate); int mgmtDropMeter(SDbObj *pDb, char *meterId, int ignore); int mgmtAlterMeter(SDbObj *pDb, SAlterTableMsg *pAlter); @@ -417,9 +419,9 @@ int mgmtRetrieveVnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn); int mgmtInitBalance(); void mgmtCleanupBalance(); int mgmtAllocVnodes(SVgObj *pVgroup); -void mgmtSetDnodeShellRemoving(SDnodeObj *pDnode); +int mgmtSetDnodeShellRemoving(SDnodeObj *pDnode); void mgmtSetDnodeUnRemove(SDnodeObj *pDnode); -void mgmtStartBalanceTimer(int mseconds); +void mgmtStartBalanceTimer(int64_t mseconds); void mgmtSetDnodeOfflineOnSdbChanged(); void mgmtUpdateVgroupState(SVgObj *pVgroup, int lbStatus, int srcIp); bool mgmtAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDestDnode); diff --git a/src/system/detail/inc/mgmtBalance.h b/src/system/detail/inc/mgmtBalance.h index 67bfd55db2ae54ab8dc4a49f7d3dc79f30b1b524..a97e7948940f7c5c9ddb2dae8370af84a82c0f34 100644 --- a/src/system/detail/inc/mgmtBalance.h +++ b/src/system/detail/inc/mgmtBalance.h @@ -25,7 +25,7 @@ extern "C" { #include "dnodeSystem.h" #include "mgmt.h" #include "tglobalcfg.h" -#include "tstatus.h" +#include "vnodeStatus.h" #include "ttime.h" void mgmtCreateDnodeOrderList(); diff --git a/src/system/detail/inc/mgmtUtil.h b/src/system/detail/inc/mgmtUtil.h index 04bacbe1dbecff10f780bbeee7d01b58076e0ba5..1f70485894e7e245193b9e432b9e40b126cdb1e6 100644 --- a/src/system/detail/inc/mgmtUtil.h +++ b/src/system/detail/inc/mgmtUtil.h @@ -37,6 +37,6 @@ int32_t mgmtRetrieveMetersFromMetric(SMetricMetaMsg* pInfo, int32_t tableIndex, int32_t mgmtDoJoin(SMetricMetaMsg* pMetricMetaMsg, tQueryResultset* pRes); void mgmtReorganizeMetersInMetricMeta(SMetricMetaMsg* pInfo, int32_t index, tQueryResultset* pRes); -bool tSkipListNodeFilterCallback(struct tSkipListNode *pNode, void *param); +bool tSkipListNodeFilterCallback(const void *pNode, void *param); #endif //TBASE_MGMTUTIL_H diff --git a/src/system/detail/inc/vnode.h b/src/system/detail/inc/vnode.h index 6e366b9b7fbfb0f5b6ad4d9c64d520c358a85e39..60449de9f5467ed6341215733156d1d8de3fb4d1 100644 --- a/src/system/detail/inc/vnode.h +++ b/src/system/detail/inc/vnode.h @@ -64,15 +64,6 @@ enum _sync_cmd { TSDB_SYNC_CMD_REMOVE, }; -enum _meter_state { - TSDB_METER_STATE_READY = 0x00, - TSDB_METER_STATE_INSERT = 0x01, - TSDB_METER_STATE_IMPORTING = 0x02, - TSDB_METER_STATE_UPDATING = 0x04, - TSDB_METER_STATE_DELETING = 0x10, - TSDB_METER_STATE_DELETED = 0x18, -}; - typedef struct { int64_t offset : 48; int64_t length : 16; @@ -206,26 +197,6 @@ typedef struct { char cont[]; } SVMsgHeader; -/* - * The value of QInfo.signature is used to denote that a query is executing, it isn't safe to release QInfo yet. - * The release operations will be blocked in a busy-waiting until the query operation reach a safepoint. - * Then it will reset the signature in a atomic operation, followed by release operation. - * Only the QInfo.signature == QInfo, this structure can be released safely. - */ -#define TSDB_QINFO_QUERY_FLAG 0x1 -#define TSDB_QINFO_RESET_SIG(x) atomic_store_64(&((x)->signature), (uint64_t)(x)) -#define TSDB_QINFO_SET_QUERY_FLAG(x) \ - atomic_val_compare_exchange_64(&((x)->signature), (uint64_t)(x), TSDB_QINFO_QUERY_FLAG); - -// live lock: wait for query reaching a safe-point, release all resources -// belongs to this query -#define TSDB_WAIT_TO_SAFE_DROP_QINFO(x) \ - { \ - while (atomic_val_compare_exchange_64(&((x)->signature), (x), 0) == TSDB_QINFO_QUERY_FLAG) { \ - taosMsleep(1); \ - } \ - } - struct tSQLBinaryExpr; typedef struct SColumnInfoEx { @@ -268,10 +239,19 @@ typedef struct SQuery { int lfd; // only for query in file, last file handle SCompBlock *pBlock; // only for query in file SField ** pFields; + int numOfBlocks; // only for query in file int blockBufferSize; // length of pBlock buffer int currentSlot; int firstSlot; + + /* + * the two parameters are utilized to handle the data missing situation, caused by import operation. + * When the commit slot is the first slot, and commitPoints != 0 + */ + int32_t commitSlot; // which slot is committed, + int32_t commitPoint; // starting point for next commit + int slot; int pos; TSKEY key; @@ -280,6 +260,7 @@ typedef struct SQuery { TSKEY skey; TSKEY ekey; int64_t nAggTimeInterval; + int64_t slidingTime; // sliding time for sliding window query char intervalTimeUnit; // interval data type, used for daytime revise int8_t precision; int16_t numOfOutputCols; @@ -287,9 +268,7 @@ typedef struct SQuery { int16_t checkBufferInLoop; // check if the buffer is full during scan each block SLimitVal limit; int32_t rowSize; - int32_t dataRowSize; // row size of each loaded data from disk, the value is - // used for prepare buffer SSqlGroupbyExpr * pGroupbyExpr; SSqlFunctionExpr * pSelectExpr; SColumnInfoEx * colList; @@ -337,7 +316,7 @@ extern void * vnodeTmrCtrl; // read API extern int (*vnodeSearchKeyFunc[])(char *pValue, int num, TSKEY key, int order); -void *vnodeQueryInTimeRange(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *sqlExprs, +void *vnodeQueryOnSingleTable(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *sqlExprs, SQueryMeterMsg *pQueryMsg, int *code); void *vnodeQueryOnMultiMeters(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, @@ -370,6 +349,8 @@ void vnodeFreeQInfo(void *, bool); void vnodeFreeQInfoInQueue(void *param); bool vnodeIsQInfoValid(void *param); +void vnodeDecRefCount(void *param); +void vnodeAddRefCount(void *param); int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQuery); diff --git a/src/system/detail/inc/vnodeQueryImpl.h b/src/system/detail/inc/vnodeQueryImpl.h index 35279ca011f1a8aeec012e1d2311862dde2f95ca..cce66786fd993b1b300a5a2abbe27e5f4eff37de 100644 --- a/src/system/detail/inc/vnodeQueryImpl.h +++ b/src/system/detail/inc/vnodeQueryImpl.h @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_VNODEQUERYUTIL_H -#define TDENGINE_VNODEQUERYUTIL_H +#ifndef TDENGINE_VNODEQUERYIMPL_H +#define TDENGINE_VNODEQUERYIMPL_H #ifdef __cplusplus extern "C" { @@ -22,12 +22,19 @@ extern "C" { #include "os.h" -#include "ihash.h" +#include "hash.h" +#include "hashutil.h" #define GET_QINFO_ADDR(x) ((char*)(x)-offsetof(SQInfo, query)) #define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0) +/* + * set the output buffer page size is 16k + * The page size should be sufficient for at least one output result or intermediate result. + * Some intermediate results may be extremely large, such as top/bottom(100) query. + */ #define DEFAULT_INTERN_BUF_SIZE 16384L + #define INIT_ALLOCATE_DISK_PAGES 60L #define DEFAULT_DATA_FILE_MAPPING_PAGES 2L #define DEFAULT_DATA_FILE_MMAP_WINDOW_SIZE (DEFAULT_DATA_FILE_MAPPING_PAGES * DEFAULT_INTERN_BUF_SIZE) @@ -57,7 +64,7 @@ typedef enum { * the next query. * * this status is only exist in group-by clause and - * diff/add/division/mulitply/ query. + * diff/add/division/multiply/ query. */ QUERY_RESBUF_FULL = 0x2, @@ -111,19 +118,19 @@ typedef enum { #define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) typedef int (*__block_search_fn_t)(char* data, int num, int64_t key, int order); -typedef int32_t (*__read_data_fn_t)(int fd, SQInfo* pQInfo, SQueryFilesInfo* pQueryFile, char* buf, uint64_t offset, - int32_t size); static FORCE_INLINE SMeterObj* getMeterObj(void* hashHandle, int32_t sid) { - return *(SMeterObj**)taosGetIntHashData(hashHandle, sid); + return *(SMeterObj**)taosGetDataFromHashTable(hashHandle, (const char*) &sid, sizeof(sid)); } bool isQueryKilled(SQuery* pQuery); bool isFixedOutputQuery(SQuery* pQuery); bool isPointInterpoQuery(SQuery* pQuery); +bool isSumAvgRateQuery(SQuery *pQuery); bool isTopBottomQuery(SQuery* pQuery); bool isFirstLastRowQuery(SQuery* pQuery); bool isTSCompQuery(SQuery* pQuery); +bool notHasQueryTimeRange(SQuery *pQuery); bool needSupplementaryScan(SQuery* pQuery); bool onDemandLoadDatablock(SQuery* pQuery, int16_t queryRangeSet); @@ -143,7 +150,6 @@ void vnodeScanAllData(SQueryRuntimeEnv* pRuntimeEnv); int32_t vnodeQueryResultInterpolate(SQInfo* pQInfo, tFilePage** pDst, tFilePage** pDataSrc, int32_t numOfRows, int32_t* numOfInterpo); void copyResToQueryResultBuf(SMeterQuerySupportObj* pSupporter, SQuery* pQuery); -void moveDescOrderResultsToFront(SQueryRuntimeEnv* pRuntimeEnv); void doSkipResults(SQueryRuntimeEnv* pRuntimeEnv); void doFinalizeResult(SQueryRuntimeEnv* pRuntimeEnv); @@ -153,29 +159,29 @@ void forwardIntervalQueryRange(SMeterQuerySupportObj* pSupporter, SQueryRuntimeE void forwardQueryStartPosition(SQueryRuntimeEnv* pRuntimeEnv); bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySupportObj* pSupporter, - SPointInterpoSupporter* pPointInterpSupporter); + SPointInterpoSupporter* pPointInterpSupporter, int64_t* key); void pointInterpSupporterInit(SQuery* pQuery, SPointInterpoSupporter* pInterpoSupport); void pointInterpSupporterDestroy(SPointInterpoSupporter* pPointInterpSupport); void pointInterpSupporterSetData(SQInfo* pQInfo, SPointInterpoSupporter* pPointInterpSupport); int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv* pRuntimeEnv, SPositionInfo* position); -void doCloseAllOpenedResults(SMeterQuerySupportObj* pSupporter); +int32_t doCloseAllOpenedResults(SMeterQuerySupportObj* pSupporter); void disableFunctForSuppleScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order); void enableFunctForMasterScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order); int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj* pSupporter); void copyFromGroupBuf(SQInfo* pQInfo, SOutputRes* result); -SBlockInfo getBlockBasicInfo(void* pBlock, int32_t blockType); -SCacheBlock* getCacheDataBlock(SMeterObj* pMeterObj, SQuery* pQuery, int32_t slot); +SBlockInfo getBlockBasicInfo(SQueryRuntimeEnv* pRuntimeEnv, void* pBlock, int32_t blockType); +SCacheBlock* getCacheDataBlock(SMeterObj* pMeterObj, SQueryRuntimeEnv* pRuntimeEnv, int32_t slot); -void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32_t blockStatus, char* data, +void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32_t blockStatus, SBlockInfo* pBlockBasicInfo, SMeterDataInfo* pDataHeadInfoEx, SField* pFields, __block_search_fn_t searchFn); -SMeterDataInfo** vnodeFilterQualifiedMeters(SQInfo* pQInfo, int32_t vid, int32_t fileIndex, - tSidSet* pSidSet, SMeterDataInfo* pMeterDataInfo, int32_t* numOfMeters); +int32_t vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, tSidSet *pSidSet, SMeterDataInfo *pMeterDataInfo, + int32_t *numOfMeters, SMeterDataInfo ***pReqMeterDataInfo); int32_t vnodeGetVnodeHeaderFileIdx(int32_t* fid, SQueryRuntimeEnv* pRuntimeEnv, int32_t order); int32_t createDataBlocksInfoEx(SMeterDataInfo** pMeterDataInfo, int32_t numOfMeters, @@ -185,16 +191,16 @@ void freeMeterBlockInfoEx(SMeterDataBlockInfoEx* pDataBlockInfoEx, int32_t len); void setExecutionContext(SMeterQuerySupportObj* pSupporter, SOutputRes* outputRes, int32_t meterIdx, int32_t groupIdx, SMeterQueryInfo* sqinfo); -void setIntervalQueryExecutionContext(SMeterQuerySupportObj* pSupporter, int32_t meterIdx, SMeterQueryInfo* sqinfo); +int32_t setIntervalQueryExecutionContext(SMeterQuerySupportObj* pSupporter, int32_t meterIdx, SMeterQueryInfo* sqinfo); int64_t getQueryStartPositionInCache(SQueryRuntimeEnv* pRuntimeEnv, int32_t* slot, int32_t* pos, bool ignoreQueryRange); int64_t getNextAccessedKeyInData(SQuery* pQuery, int64_t* pPrimaryCol, SBlockInfo* pBlockInfo, int32_t blockStatus); -uint32_t getDataBlocksForMeters(SMeterQuerySupportObj* pSupporter, SQuery* pQuery, char* pHeaderData, - int32_t numOfMeters, const char* filePath, SMeterDataInfo** pMeterDataInfo); +int32_t getDataBlocksForMeters(SMeterQuerySupportObj* pSupporter, SQuery* pQuery, int32_t numOfMeters, + const char* filePath, SMeterDataInfo** pMeterDataInfo, uint32_t* numOfBlocks); int32_t LoadDatablockOnDemand(SCompBlock* pBlock, SField** pFields, uint8_t* blkStatus, SQueryRuntimeEnv* pRuntimeEnv, int32_t fileIdx, int32_t slotIdx, __block_search_fn_t searchFn, bool onDemand); -char *vnodeGetHeaderFileData(SQueryRuntimeEnv *pRuntimeEnv, int32_t vnodeId, int32_t fileIndex); +int32_t vnodeGetHeaderFile(SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIndex); /** * Create SMeterQueryInfo. @@ -204,7 +210,7 @@ char *vnodeGetHeaderFileData(SQueryRuntimeEnv *pRuntimeEnv, int32_t vnodeId, int * @param ekey * @return */ -SMeterQueryInfo* createMeterQueryInfo(SQuery* pQuery, TSKEY skey, TSKEY ekey); +SMeterQueryInfo* createMeterQueryInfo(SQuery* pQuery, int32_t sid, TSKEY skey, TSKEY ekey); /** * Destroy meter query info @@ -219,16 +225,16 @@ void destroyMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, int32_t numOfCols); * @param skey * @param ekey */ -void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY skey, TSKEY ekey); +void changeMeterQueryInfoForSuppleQuery(SQueryResultBuf* pResultBuf, SMeterQueryInfo *pMeterQueryInfo, TSKEY skey, TSKEY ekey); /** * add the new allocated disk page to meter query info * the new allocated disk page is used to keep the intermediate (interval) results - * + * @param pQuery * @param pMeterQueryInfo * @param pSupporter */ -tFilePage* addDataPageForMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter); +tFilePage* addDataPageForMeterQueryInfo(SQuery* pQuery, SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter); /** * save the query range data into SMeterQueryInfo @@ -271,10 +277,17 @@ void displayInterResult(SData** pdata, SQuery* pQuery, int32_t numOfRows); void vnodePrintQueryStatistics(SMeterQuerySupportObj* pSupporter); -void clearGroupResultBuf(SOutputRes* pOneOutputRes, int32_t nOutputCols); +void clearGroupResultBuf(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pOneOutputRes); +void copyGroupResultBuf(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes* dst, const SOutputRes* src); + +void resetSlidingWindowInfo(SQueryRuntimeEnv *pRuntimeEnv, SSlidingWindowInfo* pSlidingWindowInfo); +void clearCompletedSlidingWindows(SQueryRuntimeEnv* pRuntimeEnv); +int32_t numOfClosedSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo); +void closeSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo, int32_t slot); +void closeAllSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo); #ifdef __cplusplus } #endif -#endif // TDENGINE_VNODEQUERYUTIL_H +#endif // TDENGINE_VNODEQUERYIMPL_H diff --git a/src/system/detail/inc/vnodeRead.h b/src/system/detail/inc/vnodeRead.h index d059075142126545a84c9cd60c0b14e1b79c5d7a..bda53cd3d86161b172a97b03b81f19dc3438f6b5 100644 --- a/src/system/detail/inc/vnodeRead.h +++ b/src/system/detail/inc/vnodeRead.h @@ -21,6 +21,7 @@ extern "C" { #endif #include "os.h" +#include "tresultBuf.h" #include "tinterpolation.h" #include "vnodeTagMgmt.h" @@ -35,24 +36,25 @@ typedef struct { int32_t fileId; } SPositionInfo; -typedef struct SQueryLoadBlockInfo { +typedef struct SLoadDataBlockInfo { int32_t fileListIndex; /* index of this file in files list of this vnode */ int32_t fileId; int32_t slotIdx; int32_t sid; -} SQueryLoadBlockInfo; + bool tsLoaded; // if timestamp column of current block is loaded or not +} SLoadDataBlockInfo; -typedef struct SQueryLoadCompBlockInfo { +typedef struct SLoadCompBlockInfo { int32_t sid; /* meter sid */ int32_t fileId; int32_t fileListIndex; -} SQueryLoadCompBlockInfo; +} SLoadCompBlockInfo; /* * the header file info for one vnode */ typedef struct SHeaderFileInfo { - int32_t fileID; // file id + int32_t fileID; // file id } SHeaderFileInfo; typedef struct SQueryCostSummary { @@ -83,10 +85,15 @@ typedef struct SQueryCostSummary { int64_t tmpBufferInDisk; // size of buffer for intermediate result } SQueryCostSummary; +typedef struct SPosInfo { + int64_t pageId; + int32_t rowId; +} SPosInfo; + typedef struct SOutputRes { uint16_t numOfRows; int32_t nAlloc; - tFilePage** result; + SPosInfo pos; SResultInfo* resultInfo; } SOutputRes; @@ -100,19 +107,42 @@ typedef struct SQueryFilesInfo { int32_t current; // the memory mapped header file, NOTE: only one header file can be mmap. int32_t vnodeId; - int32_t headerFd; // header file fd - char* pHeaderFileData; // mmap header files - int64_t headFileSize; + int32_t headerFd; // header file fd + int64_t headerFileSize; int32_t dataFd; int32_t lastFd; - - char headerFilePath[PATH_MAX]; // current opened header file name - char dataFilePath[PATH_MAX]; // current opened data file name - char lastFilePath[PATH_MAX]; // current opened last file path - char dbFilePathPrefix[PATH_MAX]; + + char headerFilePath[PATH_MAX]; // current opened header file name + char dataFilePath[PATH_MAX]; // current opened data file name + char lastFilePath[PATH_MAX]; // current opened last file path + char dbFilePathPrefix[PATH_MAX]; } SQueryFilesInfo; -typedef struct RuntimeEnvironment { +typedef struct STimeWindow { + TSKEY skey; + TSKEY ekey; +} STimeWindow; + +typedef struct SWindowStatus { + STimeWindow window; + bool closed; +} SWindowStatus; + +typedef struct SSlidingWindowInfo { + SOutputRes* pResult; // reference to SQuerySupporter->pResult + SWindowStatus* pStatus; // current query window closed or not? + void* hashList; // hash list for quick access + int16_t type; // data type for hash key + int32_t capacity; // max capacity + int32_t curIndex; // current start active index + int32_t size; + + int64_t startTime; // start time of the first time window for sliding query + int64_t prevSKey; // previous (not completed) sliding window start key + int64_t threshold; // threshold for return completed results. +} SSlidingWindowInfo; + +typedef struct SQueryRuntimeEnv { SPositionInfo startPos; /* the start position, used for secondary/third iteration */ SPositionInfo endPos; /* the last access position in query, served as the start pos of reversed order query */ SPositionInfo nextPos; /* start position of the next scan */ @@ -126,30 +156,41 @@ typedef struct RuntimeEnvironment { SQuery* pQuery; SMeterObj* pMeterObj; SQLFunctionCtx* pCtx; - SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */ - SQueryLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */ - SQueryFilesInfo vnodeFileInfo; - int16_t numOfRowsPerPage; - int16_t offset[TSDB_MAX_COLUMNS]; - int16_t scanFlag; // denotes reversed scan of data or not - SInterpolationInfo interpoInfo; - SData** pInterpoBuf; - SOutputRes* pResult; // reference to SQuerySupporter->pResult - void* hashList; - int32_t usedIndex; // assigned SOutputRes in list - STSBuf* pTSBuf; - STSCursor cur; - SQueryCostSummary summary; + SLoadDataBlockInfo loadBlockInfo; /* record current block load information */ + SLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */ + SQueryFilesInfo vnodeFileInfo; + int16_t numOfRowsPerPage; + int16_t offset[TSDB_MAX_COLUMNS]; + int16_t scanFlag; // denotes reversed scan of data or not + SInterpolationInfo interpoInfo; + SData** pInterpoBuf; + + SSlidingWindowInfo swindowResInfo; + + STSBuf* pTSBuf; + STSCursor cur; + SQueryCostSummary summary; + + STimeWindow intervalWindow; // the complete time window, not affected by the actual data distribution + + /* + * Temporarily hold the in-memory cache block info during scan cache blocks + * Here we do not use the cacheblock info from pMeterObj, simple because it may change anytime + * during the query by the subumit/insert handling threads. + * So we keep a copy of the support structure as well as the cache block data itself. + */ + SCacheBlock cacheBlock; + + SQueryResultBuf* pResultBuf; + bool stableQuery; // is super table query or not } SQueryRuntimeEnv; -/* intermediate result during multimeter query involves interval */ +/* intermediate pos during multimeter query involves interval */ typedef struct SMeterQueryInfo { int64_t lastKey; int64_t skey; int64_t ekey; int32_t numOfRes; - uint32_t numOfPages; - uint32_t numOfAlloc; int32_t reverseIndex; // reversed output indicator, start from (numOfRes-1) int16_t reverseFillRes; // denote if reverse fill the results in supplementary scan required or not int16_t queryRangeSet; // denote if the query range is set, only available for interval query @@ -157,23 +198,22 @@ typedef struct SMeterQueryInfo { int64_t tag; STSCursor cur; SResultInfo* resultInfo; - uint32_t* pageList; + int32_t sid; // for retrieve the page id list } SMeterQueryInfo; typedef struct SMeterDataInfo { uint64_t offsetInHeaderFile; int32_t numOfBlocks; int32_t start; // start block index - SCompBlock** pBlock; + SCompBlock* pBlock; int32_t meterOrderIdx; SMeterObj* pMeterObj; - int32_t groupIdx; // group id in meter list - + int32_t groupIdx; // group id in meter list SMeterQueryInfo* pMeterQInfo; } SMeterDataInfo; typedef struct SMeterQuerySupportObj { - void* pMeterObj; + void* pMetersHashTable; // meter table hash list SMeterSidExtInfo** pMeterSidExtInfo; int32_t numOfMeters; @@ -202,26 +242,16 @@ typedef struct SMeterQuerySupportObj { */ int32_t meterIdx; - int32_t meterOutputFd; - int32_t lastPageId; - int32_t numOfPages; int32_t numOfGroupResultPages; int32_t groupResultSize; - - char* meterOutputMMapBuf; - int64_t bufSize; - char extBufFile[256]; // external file name - SMeterDataInfo* pMeterDataInfo; - TSKEY* tsList; - int32_t tsNum; - + TSKEY* tsList; } SMeterQuerySupportObj; typedef struct _qinfo { - uint64_t signature; - + uint64_t signature; + int32_t refCount; // QInfo reference count, when the value is 0, it can be released safely char user[TSDB_METER_ID_LEN + 1]; char sql[TSDB_SHOW_SQL_LEN]; uint8_t stream; @@ -231,24 +261,20 @@ typedef struct _qinfo { int64_t useconds; int killed; struct _qinfo *prev, *next; + SQuery query; + int totalPoints; + int pointsRead; + int pointsReturned; + int pointsInterpo; + int code; + char bufIndex; + char changed; + char over; + SMeterObj* pObj; + sem_t dataReady; - SQuery query; - int num; - int totalPoints; - int pointsRead; - int pointsReturned; - int pointsInterpo; - int code; - char bufIndex; - char changed; - char over; - SMeterObj* pObj; - - int (*fp)(SMeterObj*, SQuery*); - - sem_t dataReady; SMeterQuerySupportObj* pMeterQuerySupporter; - + int (*fp)(SMeterObj*, SQuery*); } SQInfo; int32_t vnodeQuerySingleMeterPrepare(SQInfo* pQInfo, SMeterObj* pMeterObj, SMeterQuerySupportObj* pSMultiMeterObj, @@ -268,7 +294,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo* pQInfo, SQuery* pQuery, void* param) void vnodeDecMeterRefcnt(SQInfo* pQInfo); /* sql query handle in dnode */ -void vnodeSingleMeterQuery(SSchedMsg* pMsg); +void vnodeSingleTableQuery(SSchedMsg* pMsg); /* * handle multi-meter query process diff --git a/src/inc/tstatus.h b/src/system/detail/inc/vnodeStatus.h similarity index 70% rename from src/inc/tstatus.h rename to src/system/detail/inc/vnodeStatus.h index 58b98c59f8835d0a85b60cdd416a6835c4d139ec..456304370d34e6f4101293e197962b069085689f 100644 --- a/src/inc/tstatus.h +++ b/src/system/detail/inc/vnodeStatus.h @@ -26,7 +26,7 @@ enum _TSDB_VG_STATUS { TSDB_VG_STATUS_READY = TSDB_CODE_SUCCESS, TSDB_VG_STATUS_IN_PROGRESS = TSDB_CODE_ACTION_IN_PROGRESS, TSDB_VG_STATUS_NO_DISK_PERMISSIONS = TSDB_CODE_NO_DISK_PERMISSIONS, - TSDB_VG_STATUS_SERVER_NO_PACE = TSDB_CODE_SERVER_NO_SPACE, + TSDB_VG_STATUS_SERVER_NO_PACE = TSDB_CODE_SERV_NO_DISKSPACE, TSDB_VG_STATUS_SERV_OUT_OF_MEMORY = TSDB_CODE_SERV_OUT_OF_MEMORY, TSDB_VG_STATUS_INIT_FAILED = TSDB_CODE_VG_INIT_FAILED, TSDB_VG_STATUS_FULL = TSDB_CODE_NO_ENOUGH_DNODES, @@ -82,15 +82,26 @@ enum _TSDB_VN_STREAM_STATUS { TSDB_VN_STREAM_STATUS_START }; -const char* taosGetVgroupStatusStr(int vgroupStatus); -const char* taosGetDbStatusStr(int dbStatus); -const char* taosGetVnodeStatusStr(int vnodeStatus); -const char* taosGetVnodeSyncStatusStr(int vnodeSyncStatus); -const char* taosGetVnodeDropStatusStr(int dropping); -const char* taosGetDnodeStatusStr(int dnodeStatus); -const char* taosGetDnodeLbStatusStr(int dnodeBalanceStatus); -const char* taosGetVgroupLbStatusStr(int vglbStatus); -const char* taosGetVnodeStreamStatusStr(int vnodeStreamStatus); +enum TSDB_TABLE_STATUS { + TSDB_METER_STATE_READY = 0x00, + TSDB_METER_STATE_INSERTING = 0x01, + TSDB_METER_STATE_IMPORTING = 0x02, + TSDB_METER_STATE_UPDATING = 0x04, + TSDB_METER_STATE_DROPPING = 0x10, + TSDB_METER_STATE_DROPPED = 0x18, +}; + +const char* taosGetVgroupStatusStr(int32_t vgroupStatus); +const char* taosGetDbStatusStr(int32_t dbStatus); +const char* taosGetVnodeStatusStr(int32_t vnodeStatus); +const char* taosGetVnodeSyncStatusStr(int32_t vnodeSyncStatus); +const char* taosGetVnodeDropStatusStr(int32_t dropping); +const char* taosGetDnodeStatusStr(int32_t dnodeStatus); +const char* taosGetDnodeLbStatusStr(int32_t dnodeBalanceStatus); +const char* taosGetVgroupLbStatusStr(int32_t vglbStatus); +const char* taosGetVnodeStreamStatusStr(int32_t vnodeStreamStatus); + +const char* taosGetTableStatusStr(int32_t tableStatus); #ifdef __cplusplus } diff --git a/src/system/detail/inc/vnodeTagMgmt.h b/src/system/detail/inc/vnodeTagMgmt.h index 320ef5645395e08578d206b4f9db89809b56dafb..b801d1c5412d68ff52fc5e9c0ad33b8b23a6a469 100644 --- a/src/system/detail/inc/vnodeTagMgmt.h +++ b/src/system/detail/inc/vnodeTagMgmt.h @@ -32,21 +32,14 @@ extern "C" { * Note: * 1. we implement a quick sort algorithm, may remove it later. */ - -typedef struct tTagSchema { - struct SSchema *pSchema; - int32_t numOfCols; - int32_t colOffset[]; -} tTagSchema; - typedef struct tSidSet { int32_t numOfSids; int32_t numOfSubSet; SMeterSidExtInfo **pSids; int32_t * starterPos; // position of each subgroup, generated according to - tTagSchema *pTagSchema; - tOrderIdx orderIdx; + SColumnModel *pColumnModel; + SColumnOrderInfo orderIdx; } tSidSet; typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, void *param); @@ -54,8 +47,6 @@ typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, void *param tSidSet *tSidSetCreate(struct SMeterSidExtInfo **pMeterSidExtInfo, int32_t numOfMeters, SSchema *pSchema, int32_t numOfTags, SColIndexEx *colList, int32_t numOfOrderCols); -tTagSchema *tCreateTagSchema(SSchema *pSchema, int32_t numOfTagCols); - int32_t *calculateSubGroup(void **pSids, int32_t numOfMeters, int32_t *numOfSubset, tOrderDescriptor *pOrderDesc, __ext_compar_fn_t compareFn); diff --git a/src/system/detail/src/dnodeMgmt.c b/src/system/detail/src/dnodeMgmt.c index 69d4dc2c309ef5d4e2be694008a9ee913ae617fd..5e2b150cbb1960daa4990bbd7aef81154be3111b 100644 --- a/src/system/detail/src/dnodeMgmt.c +++ b/src/system/detail/src/dnodeMgmt.c @@ -26,7 +26,7 @@ #include "vnodeMgmt.h" #include "vnodeSystem.h" #include "vnodeUtil.h" -#include "tstatus.h" +#include "vnodeStatus.h" SMgmtObj mgmtObj; extern uint64_t tsCreatedTime; @@ -153,7 +153,7 @@ int vnodeProcessAlterStreamRequest(char *pMsg, int msgLen, SMgmtObj *pObj) { } if (pAlter->sid >= pVnode->cfg.maxSessions || pAlter->sid < 0) { - dError("vid:%d sid:%d uid:%ld, sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); + dError("vid:%d sid:%d uid:%" PRIu64 ", sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); code = TSDB_CODE_INVALID_TABLE_ID; goto _over; } diff --git a/src/system/detail/src/dnodeService.c b/src/system/detail/src/dnodeService.c index 2de60bda91f2fb172c2741cac6d15932b93e7d55..742efb7b6a72856095759d7bf8262f50103185f8 100644 --- a/src/system/detail/src/dnodeService.c +++ b/src/system/detail/src/dnodeService.c @@ -55,12 +55,8 @@ int main(int argc, char *argv[]) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-V") == 0) { - #ifdef CLUSTER - printf("enterprise version: %s compatible_version: %s\n", version, compatible_version); - #else - printf("community version: %s compatible_version: %s\n", version, compatible_version); - #endif - + char *versionStr = tsIsCluster ? "enterprise" : "community"; + printf("%s version: %s compatible_version: %s\n", versionStr, version, compatible_version); printf("gitinfo: %s\n", gitinfo); printf("gitinfoI: %s\n", gitinfoOfInternal); printf("buildinfo: %s\n", buildinfo); diff --git a/src/system/detail/src/dnodeSystem.c b/src/system/detail/src/dnodeSystem.c index 05a7bef48688cb765c871c06f23629589cb4ef6a..49e9a37590996921b1784c56e5edca4246fd2bd0 100644 --- a/src/system/detail/src/dnodeSystem.c +++ b/src/system/detail/src/dnodeSystem.c @@ -26,9 +26,6 @@ #include "tglobalcfg.h" #include "vnode.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Woverflow" - SModule tsModule[TSDB_MOD_MAX] = {0}; uint32_t tsModuleStatus = 0; pthread_mutex_t dmutex; @@ -139,7 +136,7 @@ int dnodeInitSystem() { vnodeInitMgmtIp(); tsPrintGlobalConfig(); - dPrint("Server IP address is:%s", tsInternalIp); + dPrint("Server IP address is:%s", tsPrivateIp); taosSetCoreDump(); @@ -219,5 +216,3 @@ void dnodeCountRequest(SCountInfo *info) { info->selectReqNum = atomic_exchange_32(&vnodeSelectReqNum, 0); info->insertReqNum = atomic_exchange_32(&vnodeInsertReqNum, 0); } - -#pragma GCC diagnostic pop \ No newline at end of file diff --git a/src/system/detail/src/mgmtDb.c b/src/system/detail/src/mgmtDb.c index c8c8ed1bb8b0a205dadfe36994518ec3c213f813..b935b68425e053a1a4ed466d65c8c790c701f384 100644 --- a/src/system/detail/src/mgmtDb.c +++ b/src/system/detail/src/mgmtDb.c @@ -20,7 +20,7 @@ #include "mgmtBalance.h" #include "mgmtUtil.h" #include "tschemautil.h" -#include "tstatus.h" +#include "vnodeStatus.h" void *dbSdb = NULL; int tsDbUpdateSize; @@ -54,8 +54,8 @@ void mgmtDbActionInit() { } void *mgmtDbAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtDbActionFp[action] != NULL) { - return (*(mgmtDbActionFp[action]))(row, str, size, ssize); + if (mgmtDbActionFp[(uint8_t)action] != NULL) { + return (*(mgmtDbActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } diff --git a/src/system/detail/src/mgmtDnode.c b/src/system/detail/src/mgmtDnode.c index 0504c1174b1590fa608108865f48627e604fd945..4066dd766996b6e63f9226e4df2f27cebae2738b 100644 --- a/src/system/detail/src/mgmtDnode.c +++ b/src/system/detail/src/mgmtDnode.c @@ -20,8 +20,7 @@ #include "dnodeSystem.h" #include "mgmt.h" #include "tschemautil.h" -#include "tstatus.h" -#include "tstatus.h" +#include "vnodeStatus.h" bool mgmtCheckModuleInDnode(SDnodeObj *pDnode, int moduleType); int mgmtGetDnodesNum(); @@ -468,7 +467,6 @@ int mgmtRetrieveVnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { SDnodeObj *pDnode = NULL; char * pWrite; int cols = 0; - char ipstr[20]; if (0 == rows) return 0; diff --git a/src/system/detail/src/mgmtDnodeInt.c b/src/system/detail/src/mgmtDnodeInt.c index 380cd2aff4a70b397f0729a89ce597c136eb94d6..1a6d0c9c09f6db94cb885387ee917024c2847cd2 100644 --- a/src/system/detail/src/mgmtDnodeInt.c +++ b/src/system/detail/src/mgmtDnodeInt.c @@ -210,7 +210,7 @@ char *mgmtBuildCreateMeterIe(STabObj *pMeter, char *pMsg, int vnode) { for (int i = 0; i < pMeter->numOfColumns; ++i) { pCreateMeter->schema[i].type = pSchema[i].type; - /* strcpy(pCreateMeter->schema[i].name, pSchema[i].name); */ + /* strcpy(pCreateMeter->schema[i].name, pColumnModel[i].name); */ pCreateMeter->schema[i].bytes = htons(pSchema[i].bytes); pCreateMeter->schema[i].colId = htons(pSchema[i].colId); } @@ -465,8 +465,11 @@ int mgmtCfgDynamicOptions(SDnodeObj *pDnode, char *msg) { } int mgmtSendCfgDnodeMsg(char *cont) { +#ifdef CLUSTER char * pMsg, *pStart; int msgLen = 0; +#endif + SDnodeObj *pDnode; SCfgMsg * pCfg = (SCfgMsg *)cont; uint32_t ip; @@ -484,6 +487,7 @@ int mgmtSendCfgDnodeMsg(char *cont) { return code; } +#ifdef CLUSTER pStart = taosBuildReqMsg(pDnode->thandle, TSDB_MSG_TYPE_CFG_PNODE); if (pStart == NULL) return TSDB_CODE_NODE_OFFLINE; pMsg = pStart; @@ -493,6 +497,8 @@ int mgmtSendCfgDnodeMsg(char *cont) { msgLen = pMsg - pStart; taosSendMsgToDnode(pDnode, pStart, msgLen); - +#else + (void)tsCfgDynamicOptions(pCfg->config); +#endif return 0; } diff --git a/src/system/detail/src/mgmtMeter.c b/src/system/detail/src/mgmtMeter.c index 05ee0762be778dca704b845b647eb92a6604c374..a2a6ed8a7d3e506ddfcd1926683a0d823906c36a 100644 --- a/src/system/detail/src/mgmtMeter.c +++ b/src/system/detail/src/mgmtMeter.c @@ -27,7 +27,7 @@ #include "tsqlfunction.h" #include "ttime.h" #include "vnodeTagMgmt.h" -#include "tstatus.h" +#include "vnodeStatus.h" extern int64_t sdbVersion; @@ -418,8 +418,8 @@ void *mgmtMeterActionAfterBatchUpdate(void *row, char *str, int size, int *ssize } void *mgmtMeterAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtMeterActionFp[action] != NULL) { - return (*(mgmtMeterActionFp[action]))(row, str, size, ssize); + if (mgmtMeterActionFp[(uint8_t)action] != NULL) { + return (*(mgmtMeterActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } @@ -688,7 +688,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->uid = (((uint64_t)pMeter->gid.vgId) << 40) + ((((uint64_t)pMeter->gid.sid) & ((1ul << 24) - 1ul)) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); - mTrace("table:%s, create table in vgroup, vgId:%d sid:%d vnode:%d uid:%llu db:%s", + mTrace("table:%s, create table in vgroup, vgId:%d sid:%d vnode:%d uid:%" PRIu64 " db:%s", pMeter->meterId, pVgroup->vgId, sid, pVgroup->vnodeGid[0].vnode, pMeter->uid, pDb->name); } else { pMeter->uid = (((uint64_t)pMeter->createdTime) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); @@ -981,10 +981,32 @@ SSchema *mgmtGetMeterSchema(STabObj *pMeter) { return (SSchema *)pMetric->schema; } +static int32_t mgmtSerializeTagValue(char* pMsg, STabObj* pMeter, int16_t* tagsId, int32_t numOfTags) { + int32_t offset = 0; + + for (int32_t j = 0; j < numOfTags; ++j) { + if (tagsId[j] == TSDB_TBNAME_COLUMN_INDEX) { // handle the table name tags + char name[TSDB_METER_NAME_LEN] = {0}; + extractTableName(pMeter->meterId, name); + + memcpy(pMsg + offset, name, TSDB_METER_NAME_LEN); + offset += TSDB_METER_NAME_LEN; + } else { + SSchema s = {0}; + char * tag = mgmtMeterGetTag(pMeter, tagsId[j], &s); + + memcpy(pMsg + offset, tag, (size_t)s.bytes); + offset += s.bytes; + } + } + + return offset; +} + /* * serialize SVnodeSidList to byte array */ -static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidList **pList, SMetricMeta *pMeta, +static char *mgmtBuildMetricMetaMsg(SConnObj *pConn, STabObj *pMeter, int32_t *ovgId, SVnodeSidList **pList, SMetricMeta *pMeta, int32_t tagLen, int16_t numOfTags, int16_t *tagsId, int32_t maxNumOfMeters, char *pMsg) { if (pMeter->gid.vgId != *ovgId || ((*pList) != NULL && (*pList)->numOfSids >= maxNumOfMeters)) { @@ -993,7 +1015,6 @@ static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidLi * 1. the query msg may be larger than 64k, * 2. the following meters belong to different vnodes */ - (*pList) = (SVnodeSidList *)pMsg; (*pList)->numOfSids = 0; (*pList)->index = 0; @@ -1001,8 +1022,13 @@ static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidLi SVgObj *pVgroup = mgmtGetVgroup(pMeter->gid.vgId); for (int i = 0; i < TSDB_VNODES_SUPPORT; ++i) { - (*pList)->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; - (*pList)->vpeerDesc[i].vnode = pVgroup->vnodeGid[i].vnode; + if (pConn->usePublicIp) { + (*pList)->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; + (*pList)->vpeerDesc[i].vnode = pVgroup->vnodeGid[i].vnode; + } else { + (*pList)->vpeerDesc[i].ip = pVgroup->vnodeGid[i].ip; + (*pList)->vpeerDesc[i].vnode = pVgroup->vnodeGid[i].vnode; + } } pMsg += sizeof(SVnodeSidList); @@ -1012,29 +1038,15 @@ static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidLi (*pList)->numOfSids++; SMeterSidExtInfo *pSMeterTagInfo = (SMeterSidExtInfo *)pMsg; - pSMeterTagInfo->sid = pMeter->gid.sid; + pSMeterTagInfo->sid = htonl(pMeter->gid.sid); + pSMeterTagInfo->uid = htobe64(pMeter->uid); + pMsg += sizeof(SMeterSidExtInfo); - int32_t offset = 0; - for (int32_t j = 0; j < numOfTags; ++j) { - if (tagsId[j] == -1) { - char name[TSDB_METER_NAME_LEN] = {0}; - extractMeterName(pMeter->meterId, name); - - memcpy(pMsg + offset, name, TSDB_METER_NAME_LEN); - offset += TSDB_METER_NAME_LEN; - } else { - SSchema s = {0}; - char * tag = mgmtMeterGetTag(pMeter, tagsId[j], &s); - - memcpy(pMsg + offset, tag, (size_t)s.bytes); - offset += s.bytes; - } - } - - pMsg += offset; + int32_t offset = mgmtSerializeTagValue(pMsg, pMeter, tagsId, numOfTags); assert(offset == tagLen); - + + pMsg += offset; return pMsg; } @@ -1091,18 +1103,21 @@ static SMetricMetaElemMsg *doConvertMetricMetaMsg(SMetricMetaMsg *pMetricMetaMsg pElem->groupbyTagColumnList = htonl(pElem->groupbyTagColumnList); - int16_t *groupColIds = (int16_t*) (((char *)pMetricMetaMsg) + pElem->groupbyTagColumnList); + SColIndexEx *groupColIds = (SColIndexEx*) (((char *)pMetricMetaMsg) + pElem->groupbyTagColumnList); for (int32_t i = 0; i < pElem->numOfGroupCols; ++i) { - groupColIds[i] = htons(groupColIds[i]); + groupColIds[i].colId = htons(groupColIds[i].colId); + groupColIds[i].colIdx = htons(groupColIds[i].colIdx); + groupColIds[i].flag = htons(groupColIds[i].flag); + groupColIds[i].colIdxInBuf = 0; } return pElem; } -static int32_t mgmtBuildMetricMetaRspMsg(void *thandle, SMetricMetaMsg *pMetricMetaMsg, tQueryResultset *pResult, +static int32_t mgmtBuildMetricMetaRspMsg(SConnObj *pConn, SMetricMetaMsg *pMetricMetaMsg, tQueryResultset *pResult, char **pStart, int32_t *tagLen, int32_t rspMsgSize, int32_t maxTablePerVnode, int32_t code) { - *pStart = taosBuildRspMsgWithSize(thandle, TSDB_MSG_TYPE_METRIC_META_RSP, rspMsgSize); + *pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_METRIC_META_RSP, rspMsgSize); if (*pStart == NULL) { return 0; } @@ -1140,7 +1155,7 @@ static int32_t mgmtBuildMetricMetaRspMsg(void *thandle, SMetricMetaMsg *pMetricM for (int32_t i = 0; i < pResult[j].num; ++i) { STabObj *pMeter = pResult[j].pRes[i]; - pMsg = mgmtBuildMetricMetaMsg(pMeter, &ovgId, &pList, pMeta, tagLen[j], pElem->numOfTags, pElem->tagCols, + pMsg = mgmtBuildMetricMetaMsg(pConn, pMeter, &ovgId, &pList, pMeta, tagLen[j], pElem->numOfTags, pElem->tagCols, maxTablePerVnode, pMsg); } @@ -1156,7 +1171,7 @@ static int32_t mgmtBuildMetricMetaRspMsg(void *thandle, SMetricMetaMsg *pMetricM return msgLen; } -int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pMetricMetaMsg) { +int mgmtRetrieveMetricMeta(SConnObj *pConn, char **pStart, SMetricMetaMsg *pMetricMetaMsg) { /* * naive method: Do not limit the maximum number of meters in each * vnode(subquery), split the result according to vnodes @@ -1171,6 +1186,8 @@ int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pMetric int32_t * tagLen = calloc(1, sizeof(int32_t) * pMetricMetaMsg->numOfMeters); if (result == NULL || tagLen == NULL) { + tfree(result); + tfree(tagLen); return -1; } @@ -1203,12 +1220,9 @@ int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pMetric #endif if (ret == TSDB_CODE_SUCCESS) { + // todo opt performance for (int32_t i = 0; i < pMetricMetaMsg->numOfMeters; ++i) { ret = mgmtRetrieveMetersFromMetric(pMetricMetaMsg, i, &result[i]); - // todo opt performance - // if (result[i].num <= 0) {//no result - // } else if (result[i].num < 10) { - // } } } @@ -1230,8 +1244,7 @@ int mgmtRetrieveMetricMeta(void *thandle, char **pStart, SMetricMetaMsg *pMetric msgLen = 512; } - msgLen = mgmtBuildMetricMetaRspMsg(thandle, pMetricMetaMsg, result, pStart, tagLen, msgLen, maxMetersPerVNodeForQuery, - ret); + msgLen = mgmtBuildMetricMetaRspMsg(pConn, pMetricMetaMsg, result, pStart, tagLen, msgLen, maxMetersPerVNodeForQuery, ret); for (int32_t i = 0; i < pMetricMetaMsg->numOfMeters; ++i) { tQueryResultClean(&result[i]); @@ -1282,7 +1295,7 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { memset(meterName, 0, tListLen(meterName)); // pattern compare for meter name - extractMeterName(pMeter->meterId, meterName); + extractTableName(pMeter->meterId, meterName); if (pShow->payloadLen > 0 && patternMatch(pShow->payload, meterName, TSDB_METER_NAME_LEN, &info) != TSDB_PATTERN_MATCH) @@ -1304,7 +1317,7 @@ int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; if (pMeter->pTagData) { - extractMeterName(pMeter->pTagData, pWrite); + extractTableName(pMeter->pTagData, pWrite); } cols++; @@ -1398,7 +1411,7 @@ int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn) pShow->pNode = (void *)pMetric->next; memset(metricName, 0, tListLen(metricName)); - extractMeterName(pMetric->meterId, metricName); + extractTableName(pMetric->meterId, metricName); if (pShow->payloadLen > 0 && patternMatch(pShow->payload, metricName, TSDB_METER_NAME_LEN, &info) != TSDB_PATTERN_MATCH) @@ -1407,7 +1420,7 @@ int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn) cols = 0; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - extractMeterName(pMetric->meterId, pWrite); + extractTableName(pMetric->meterId, pWrite); cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; diff --git a/src/system/detail/src/mgmtProfile.c b/src/system/detail/src/mgmtProfile.c index e7dbeaaa254da098dcdac5a15b6b0feccb5f32f2..c1cd98952311f4cda6bf97cf2448de244299c8b8 100644 --- a/src/system/detail/src/mgmtProfile.c +++ b/src/system/detail/src/mgmtProfile.c @@ -499,10 +499,9 @@ int mgmtKillConnection(char *qidstr, SConnObj *pConn) { uint32_t ip = inet_addr(temp); temp = chr + 1; - short port = htons(atoi(temp)); - + uint16_t port = htons(atoi(temp)); SAcctObj *pAcct = pConn->pAcct; - + pthread_mutex_lock(&pAcct->mutex); pConn = pAcct->pConn; diff --git a/src/system/detail/src/mgmtShell.c b/src/system/detail/src/mgmtShell.c index ba45883050e2926e6a631145842ff6a63847f1d5..06556c817f0c6b1d99706893b0172ae6b732968f 100644 --- a/src/system/detail/src/mgmtShell.c +++ b/src/system/detail/src/mgmtShell.c @@ -21,13 +21,7 @@ #include "mgmtProfile.h" #include "taosmsg.h" #include "tlog.h" -#include "tstatus.h" - -#pragma GCC diagnostic push - -#pragma GCC diagnostic ignored "-Woverflow" -#pragma GCC diagnostic ignored "-Wpointer-sign" -#pragma GCC diagnostic ignored "-Wint-conversion" +#include "vnodeStatus.h" #define MAX_LEN_OF_METER_META (sizeof(SMultiMeterMeta) + sizeof(SSchema) * TSDB_MAX_COLUMNS + sizeof(SSchema) * TSDB_MAX_TAGS + TSDB_MAX_TAGS_LEN) @@ -79,11 +73,8 @@ int mgmtInitShell() { if (numOfThreads < 1) numOfThreads = 1; memset(&rpcInit, 0, sizeof(rpcInit)); -#ifdef CLUSTER - rpcInit.localIp = tsInternalIp; -#else - rpcInit.localIp = "0.0.0.0"; -#endif + + rpcInit.localIp = tsAnyIp ? "0.0.0.0" : tsPrivateIp;; rpcInit.localPort = tsMgmtShellPort; rpcInit.label = "MND-shell"; rpcInit.numOfThreads = numOfThreads; @@ -266,7 +257,7 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { pRsp->code = TSDB_CODE_DB_NOT_SELECTED; pMsg++; } else { - mTrace("%s, uid:%lld meter meta is retrieved", pInfo->meterId, pMeterObj->uid); + mTrace("%s, uid:%" PRIu64 " meter meta is retrieved", pInfo->meterId, pMeterObj->uid); pRsp->code = 0; pMsg += sizeof(STaosRsp); *pMsg = TSDB_IE_TYPE_META; @@ -317,8 +308,13 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { goto _exit_code; } for (int i = 0; i < TSDB_VNODES_SUPPORT; ++i) { - pMeta->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; - pMeta->vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + if (pConn->usePublicIp) { + pMeta->vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; + pMeta->vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } else { + pMeta->vpeerDesc[i].ip = pVgroup->vnodeGid[i].ip; + pMeta->vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } } } } @@ -406,7 +402,7 @@ int mgmtProcessMultiMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (pMeterObj == NULL || (pDbObj == NULL)) { continue; } else { - mTrace("%s, uid:%lld sversion:%d meter meta is retrieved", tblName, pMeterObj->uid, pMeterObj->sversion); + mTrace("%s, uid:%" PRIu64 " sversion:%d meter meta is retrieved", tblName, pMeterObj->uid, pMeterObj->sversion); pMeta = (SMultiMeterMeta *)pCurMeter; memcpy(pMeta->meterId, tblName, strlen(tblName)); @@ -450,14 +446,19 @@ int mgmtProcessMultiMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (pVgroup == NULL) { pRsp->code = TSDB_CODE_INVALID_TABLE; pNewMsg++; - mError("%s, uid:%lld sversion:%d vgId:%d pVgroup is NULL", tblName, pMeterObj->uid, pMeterObj->sversion, + mError("%s, uid:%" PRIu64 " sversion:%d vgId:%d pVgroup is NULL", tblName, pMeterObj->uid, pMeterObj->sversion, pMeterObj->gid.vgId); goto _error_exit_code; } for (int i = 0; i < TSDB_VNODES_SUPPORT; ++i) { - pMeta->meta.vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; - pMeta->meta.vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + if (pConn->usePublicIp) { + pMeta->meta.vpeerDesc[i].ip = pVgroup->vnodeGid[i].publicIp; + pMeta->meta.vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } else { + pMeta->meta.vpeerDesc[i].ip = pVgroup->vnodeGid[i].ip; + pMeta->meta.vpeerDesc[i].vnode = htonl(pVgroup->vnodeGid[i].vnode); + } } } } @@ -529,7 +530,7 @@ int mgmtProcessMetricMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { msgLen = pMsg - pStart; } else { - msgLen = mgmtRetrieveMetricMeta(pConn->thandle, &pStart, pMetricMetaMsg); + msgLen = mgmtRetrieveMetricMeta(pConn, &pStart, pMetricMetaMsg); if (msgLen <= 0) { taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_METRIC_META_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); return 0; @@ -720,7 +721,7 @@ int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (hasRight) { memset(pUser->pass, 0, sizeof(pUser->pass)); - taosEncryptPass(pAlter->pass, strlen(pAlter->pass), pUser->pass); + taosEncryptPass((uint8_t*)pAlter->pass, strlen(pAlter->pass), pUser->pass); code = mgmtUpdateUser(pUser); mLPrint("user:%s password is altered by %s, code:%d", pAlter->user, pConn->pUser->user, code); } else { @@ -899,7 +900,7 @@ int mgmtProcessShowMsg(char *pMsg, int msgLen, SConnObj *pConn) { SShowRspMsg *pShowRsp; SShowObj * pShow = NULL; - if (pShowMsg->type == TSDB_MGMT_TABLE_PNODE || TSDB_MGMT_TABLE_GRANTS || TSDB_MGMT_TABLE_SCORES) { + if (pShowMsg->type == TSDB_MGMT_TABLE_DNODE || TSDB_MGMT_TABLE_GRANTS || TSDB_MGMT_TABLE_SCORES) { if (mgmtCheckRedirectMsg(pConn, TSDB_MSG_TYPE_SHOW_RSP) != 0) { return 0; } @@ -933,11 +934,11 @@ int mgmtProcessShowMsg(char *pMsg, int msgLen, SConnObj *pConn) { pShowRsp->qhandle = (uint64_t)pShow; // qhandle; pConn->qhandle = pShowRsp->qhandle; - code = (*mgmtGetMetaFp[pShowMsg->type])(&pShowRsp->meterMeta, pShow, pConn); + code = (*mgmtGetMetaFp[(uint8_t)pShowMsg->type])(&pShowRsp->meterMeta, pShow, pConn); if (code == 0) { pMsg += sizeof(SShowRspMsg) + sizeof(SSchema) * pShow->numOfColumns; } else { - mError("pShow:%p, type:%d %s, failed to get Meta, code:%d", pShow, pShowMsg->type, taosMsg[pShowMsg->type], code); + mError("pShow:%p, type:%d %s, failed to get Meta, code:%d", pShow, pShowMsg->type, taosMsg[(uint8_t)pShowMsg->type], code); free(pShow); } } @@ -1008,7 +1009,7 @@ int mgmtProcessRetrieveMsg(char *pMsg, int msgLen, SConnObj *pConn) { // if free flag is set, client wants to clean the resources if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) - rowsRead = (*mgmtRetrieveFp[pShow->type])(pShow, pRsp->data, rowsToRead, pConn); + rowsRead = (*mgmtRetrieveFp[(uint8_t)pShow->type])(pShow, pRsp->data, rowsToRead, pConn); if (rowsRead < 0) { rowsRead = 0; @@ -1024,7 +1025,7 @@ int mgmtProcessRetrieveMsg(char *pMsg, int msgLen, SConnObj *pConn) { taosSendMsgToPeer(pConn->thandle, pStart, msgLen); if (rowsToRead == 0) { - uintptr_t oldSign = atomic_val_compare_exchange_ptr(&pShow->signature, pShow, 0); + uintptr_t oldSign = (uintptr_t)atomic_val_compare_exchange_ptr(&pShow->signature, pShow, 0); if (oldSign != (uintptr_t)pShow) { return msgLen; } @@ -1071,12 +1072,19 @@ int mgmtProcessCreateTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (code == 1) { //mTrace("table:%s, wait vgroup create finish", pCreate->meterId, code); - } - else if (code != 0) { - mError("table:%s, failed to create table, code:%d", pCreate->meterId, code); + } else if (code != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_TABLE_ALREADY_EXIST) { // table already created when the second attempt to create table + + STabObj* pMeter = mgmtGetMeter(pCreate->meterId); + assert(pMeter != NULL); + + mWarn("table:%s, table already created, failed to create table, ts:%" PRId64 ", code:%d", pCreate->meterId, + pMeter->createdTime, code); + } else { // other errors + mError("table:%s, failed to create table, code:%d", pCreate->meterId, code); + } } else { mTrace("table:%s, table is created by %s", pCreate->meterId, pConn->pUser->user); - //mLPrint("meter:%s is created by %s", pCreate->meterId, pConn->pUser->user); } taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_TABLE_RSP, code); @@ -1194,14 +1202,28 @@ int mgmtProcessHeartBeatMsg(char *cont, int contLen, SConnObj *pConn) { pConn->streamId = 0; pHBRsp->killConnection = pConn->killConnection; -#ifdef CLUSTER - int size = pSdbPublicIpList->numOfIps * 4; - pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; - memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); - pMsg += sizeof(SHeartBeatRsp) + size; -#else - pMsg += sizeof(SHeartBeatRsp); -#endif + if (pConn->usePublicIp) { + if (pSdbPublicIpList != NULL) { + int size = pSdbPublicIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } else { + pHBRsp->ipList.numOfIps = 0; + pMsg += sizeof(SHeartBeatRsp); + } + + } else { + if (pSdbIpList != NULL) { + int size = pSdbIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } else { + pHBRsp->ipList.numOfIps = 0; + pMsg += sizeof(SHeartBeatRsp); + } + } msgLen = pMsg - pStart; taosSendMsgToPeer(pConn->thandle, pStart, msgLen); @@ -1225,8 +1247,9 @@ void mgmtEstablishConn(SConnObj *pConn) { } } - uint32_t temp; - taosGetRpcConnInfo(pConn->thandle, &temp, &pConn->ip, &pConn->port, &temp, &temp); + int32_t tempint32; + uint32_t tempuint32; + taosGetRpcConnInfo(pConn->thandle, &tempuint32, &pConn->ip, &pConn->port, &tempint32, &tempint32); mgmtAddConnIntoAcct(pConn); } @@ -1274,6 +1297,12 @@ int mgmtProcessConnectMsg(char *pMsg, int msgLen, SConnObj *pConn) { pAcct = mgmtGetAcct(pUser->acct); + code = taosCheckVersion(pConnectMsg->clientVersion, version, 3); + if (code != 0) { + mError("invalid client version:%s", pConnectMsg->clientVersion); + goto _rsp; + } + if (pConnectMsg->db[0]) { sprintf(dbName, "%x%s%s", pAcct->acctId, TS_PATH_DELIMITER, pConnectMsg->db); pDb = mgmtGetDb(dbName); @@ -1294,7 +1323,7 @@ int mgmtProcessConnectMsg(char *pMsg, int msgLen, SConnObj *pConn) { pConn->pDb = pDb; pConn->pUser = pUser; mgmtEstablishConn(pConn); - + _rsp: pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_CONNECT_RSP, 128); if (pStart == NULL) return 0; @@ -1312,11 +1341,22 @@ _rsp: pConnectRsp->superAuth = pConn->superAuth; pMsg += sizeof(SConnectRsp); -#ifdef CLUSTER - int size = pSdbPublicIpList->numOfIps * 4 + sizeof(SIpList); - memcpy(pMsg, pSdbPublicIpList, size); + int size; + if (pSdbPublicIpList != NULL && pSdbIpList != NULL) { + size = pSdbPublicIpList->numOfIps * 4 + sizeof(SIpList); + if (pConn->usePublicIp) { + memcpy(pMsg, pSdbPublicIpList, size); + } else { + memcpy(pMsg, pSdbIpList, size); + } + } else { + SIpList tmpIpList; + tmpIpList.numOfIps = 0; + size = tmpIpList.numOfIps * 4 + sizeof(SIpList); + memcpy(pMsg, &tmpIpList, size); + } + pMsg += size; -#endif // set the time resolution: millisecond or microsecond *((uint32_t *)pMsg) = tsTimePrecision; @@ -1364,6 +1404,9 @@ void *mgmtProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { pConn = connList + pMsg->destId; pConn->thandle = thandle; strcpy(pConn->user, pMsg->meterId); + pConn->usePublicIp = (pMsg->destIp == tsPublicIpInt ? 1 : 0); + mTrace("pConn:%p is rebuild, destIp:%s publicIp:%s usePublicIp:%u", + pConn, taosIpStr(pMsg->destIp), taosIpStr(tsPublicIpInt), pConn->usePublicIp); } if (pMsg->msgType == TSDB_MSG_TYPE_CONNECT) { @@ -1444,8 +1487,8 @@ void mgmtInitProcessShellMsg() { mgmtProcessShellMsg[TSDB_MSG_TYPE_SHOW] = mgmtProcessShowMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_CONNECT] = mgmtProcessConnectMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_HEARTBEAT] = mgmtProcessHeartBeatMsg; - mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_PNODE] = mgmtProcessCreateDnodeMsg; - mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_PNODE] = mgmtProcessDropDnodeMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_DNODE] = mgmtProcessCreateDnodeMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_DNODE] = mgmtProcessDropDnodeMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_MNODE] = mgmtProcessCreateMnodeMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_MNODE] = mgmtProcessDropMnodeMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_CFG_MNODE] = mgmtProcessCfgMnodeMsg; @@ -1454,5 +1497,3 @@ void mgmtInitProcessShellMsg() { mgmtProcessShellMsg[TSDB_MSG_TYPE_KILL_STREAM] = mgmtProcessKillStreamMsg; mgmtProcessShellMsg[TSDB_MSG_TYPE_KILL_CONNECTION] = mgmtProcessKillConnectionMsg; } - -#pragma GCC diagnostic pop diff --git a/src/system/detail/src/mgmtSupertableQuery.c b/src/system/detail/src/mgmtSupertableQuery.c index 4dc7760d897145d5f1af202b79ff84720ab4f3b8..347b54595eea5ab786d622e68c2befd122f76e07 100644 --- a/src/system/detail/src/mgmtSupertableQuery.c +++ b/src/system/detail/src/mgmtSupertableQuery.c @@ -70,7 +70,7 @@ static int32_t tabObjResultComparator(const void* p1, const void* p2, void* para STabObj* pNode1 = (STabObj*)p1; STabObj* pNode2 = (STabObj*)p2; - for (int32_t i = 0; i < pOrderDesc->orderIdx.numOfOrderedCols; ++i) { + for (int32_t i = 0; i < pOrderDesc->orderIdx.numOfCols; ++i) { int32_t colIdx = pOrderDesc->orderIdx.pData[i]; char* f1 = NULL; @@ -86,7 +86,9 @@ static int32_t tabObjResultComparator(const void* p1, const void* p2, void* para } else { f1 = mgmtMeterGetTag(pNode1, colIdx, NULL); f2 = mgmtMeterGetTag(pNode2, colIdx, &schema); - assert(schema.type == pOrderDesc->pTagSchema->pSchema[colIdx].type); + + SSchema* pSchema = getColumnModelSchema(pOrderDesc->pColumnModel, colIdx); + assert(schema.type == pSchema->type); } int32_t ret = doCompare(f1, f2, schema.type, schema.bytes); @@ -100,6 +102,32 @@ static int32_t tabObjResultComparator(const void* p1, const void* p2, void* para return 0; } +/** + * update the tag order index according to the tags column index. The tags column index needs to be checked one-by-one, + * since the normal columns may be passed to server for handling the group by on status column. + * + * @param pMetricMetaMsg + * @param tableIndex + * @param pOrderIndexInfo + * @param numOfTags + */ +static void mgmtUpdateOrderTagColIndex(SMetricMetaMsg* pMetricMetaMsg, int32_t tableIndex, SColumnOrderInfo* pOrderIndexInfo, + int32_t numOfTags) { + SMetricMetaElemMsg* pElem = (SMetricMetaElemMsg*)((char*)pMetricMetaMsg + pMetricMetaMsg->metaElem[tableIndex]); + SColIndexEx* groupColumnList = (SColIndexEx*)((char*)pMetricMetaMsg + pElem->groupbyTagColumnList); + + int32_t numOfGroupbyTags = 0; + for (int32_t i = 0; i < pElem->numOfGroupCols; ++i) { + if (groupColumnList[i].flag == TSDB_COL_TAG) { // ignore this column if it is not a tag column. + pOrderIndexInfo->pData[numOfGroupbyTags++] = groupColumnList[i].colIdx; + + assert(groupColumnList[i].colIdx < numOfTags); + } + } + + pOrderIndexInfo->numOfCols = numOfGroupbyTags; +} + // todo merge sort function with losertree used void mgmtReorganizeMetersInMetricMeta(SMetricMetaMsg* pMetricMetaMsg, int32_t tableIndex, tQueryResultset* pRes) { if (pRes->num <= 0) { // no result, no need to pagination @@ -117,18 +145,14 @@ void mgmtReorganizeMetersInMetricMeta(SMetricMetaMsg* pMetricMetaMsg, int32_t ta */ tOrderDescriptor* descriptor = (tOrderDescriptor*)calloc(1, sizeof(tOrderDescriptor) + sizeof(int32_t) * pElem->numOfGroupCols); - descriptor->pTagSchema = tCreateTagSchema(pTagSchema, pMetric->numOfTags); - descriptor->orderIdx.numOfOrderedCols = pElem->numOfGroupCols; + descriptor->pColumnModel = createColumnModel(pTagSchema, pMetric->numOfTags, 1); + descriptor->orderIdx.numOfCols = pElem->numOfGroupCols; int32_t* startPos = NULL; int32_t numOfSubset = 1; - - if (pElem->numOfGroupCols > 0) { - SColIndexEx* groupColumnList = (SColIndexEx*)((char*)pMetricMetaMsg + pElem->groupbyTagColumnList); - for (int32_t i = 0; i < pElem->numOfGroupCols; ++i) { - descriptor->orderIdx.pData[i] = groupColumnList[i].colIdx; - } - + + mgmtUpdateOrderTagColIndex(pMetricMetaMsg, tableIndex, &descriptor->orderIdx, pMetric->numOfTags); + if (descriptor->orderIdx.numOfCols > 0) { tQSortEx(pRes->pRes, POINTER_BYTES, 0, pRes->num - 1, descriptor, tabObjResultComparator); startPos = calculateSubGroup(pRes->pRes, pRes->num, &numOfSubset, descriptor, tabObjResultComparator); } else { @@ -144,7 +168,7 @@ void mgmtReorganizeMetersInMetricMeta(SMetricMetaMsg* pMetricMetaMsg, int32_t ta */ qsort(pRes->pRes, (size_t)pRes->num, POINTER_BYTES, tabObjVGIDComparator); - free(descriptor->pTagSchema); + free(descriptor->pColumnModel); free(descriptor); free(startPos); } @@ -196,14 +220,14 @@ static bool mgmtTablenameFilterCallback(tSkipListNode* pNode, void* param) { // pattern compare for meter name STabObj* pMeterObj = (STabObj*)pNode->pData; - extractMeterName(pMeterObj->meterId, name); + extractTableName(pMeterObj->meterId, name); return patternMatch(pSupporter->pattern, name, TSDB_METER_ID_LEN, &pSupporter->info) == TSDB_PATTERN_MATCH; } static void mgmtRetrieveFromLikeOptr(tQueryResultset* pRes, const char* str, STabObj* pMetric) { SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; - SMeterNameFilterSupporter supporter = {info, str}; + SMeterNameFilterSupporter supporter = {info, (char*) str}; pRes->num = tSkipListIterateList(pMetric->pSkipList, (tSkipListNode***)&pRes->pRes, mgmtTablenameFilterCallback, &supporter); @@ -230,7 +254,7 @@ static void mgmtFilterByTableNameCond(tQueryResultset* pRes, char* condStr, int3 free(str); } -static bool mgmtJoinFilterCallback(tSkipListNode* pNode, void* param) { +UNUSED_FUNC static bool mgmtJoinFilterCallback(tSkipListNode* pNode, void* param) { SJoinSupporter* pSupporter = (SJoinSupporter*)param; SSchema s = {0}; @@ -269,15 +293,15 @@ static void orderResult(SMetricMetaMsg* pMetricMetaMsg, tQueryResultset* pRes, i STabObj* pMetric = mgmtGetMeter(pElem->meterId); SSchema* pTagSchema = (SSchema*)(pMetric->schema + pMetric->numOfColumns * sizeof(SSchema)); - descriptor->pTagSchema = tCreateTagSchema(pTagSchema, pMetric->numOfTags); + descriptor->pColumnModel = createColumnModel(pTagSchema, pMetric->numOfTags, 1); descriptor->orderIdx.pData[0] = colIndex; - descriptor->orderIdx.numOfOrderedCols = 1; + descriptor->orderIdx.numOfCols = 1; // sort results list tQSortEx(pRes->pRes, POINTER_BYTES, 0, pRes->num - 1, descriptor, tabObjResultComparator); - free(descriptor->pTagSchema); + free(descriptor->pColumnModel); free(descriptor); } @@ -307,7 +331,7 @@ int32_t mgmtDoJoin(SMetricMetaMsg* pMetricMetaMsg, tQueryResultset* pRes) { bool allEmpty = false; for (int32_t i = 0; i < pMetricMetaMsg->numOfMeters; ++i) { - if (pRes->num == 0) { // all results are empty if one of them is empty + if (pRes[i].num == 0) { // all results are empty if one of them is empty allEmpty = true; break; } @@ -639,7 +663,8 @@ static void getTagColumnInfo(SSyntaxTreeFilterSupporter* pSupporter, SSchema* pS } } -void filterPrepare(tSQLBinaryExpr* pExpr, void* param) { +void filterPrepare(void* expr, void* param) { + tSQLBinaryExpr *pExpr = (tSQLBinaryExpr*) expr; if (pExpr->info != NULL) { return; } @@ -691,7 +716,9 @@ static int32_t mgmtFilterMeterByIndex(STabObj* pMetric, tQueryResultset* pRes, c return TSDB_CODE_OPS_NOT_SUPPORT; } else { // query according to the binary expression SSyntaxTreeFilterSupporter s = {.pTagSchema = pTagSchema, .numOfTags = pMetric->numOfTags}; - SBinaryFilterSupp supp = {.fp = tSkipListNodeFilterCallback, .setupInfoFn = filterPrepare, .pExtInfo = &s}; + SBinaryFilterSupp supp = {.fp = (__result_filter_fn_t)tSkipListNodeFilterCallback, + .setupInfoFn = (__do_filter_suppl_fn_t)filterPrepare, + .pExtInfo = &s}; tSQLBinaryExprTraverse(pExpr, pMetric->pSkipList, pRes, &supp); tSQLBinaryExprDestroy(&pExpr, tSQLListTraverseDestroyInfo); @@ -781,22 +808,25 @@ int mgmtRetrieveMetersFromMetric(SMetricMetaMsg* pMsg, int32_t tableIndex, tQuer } // todo refactor!!!!! -static char* getTagValueFromMeter(STabObj* pMeter, int32_t offset, void* param) { +static char* getTagValueFromMeter(STabObj* pMeter, int32_t offset, int32_t len, char* param) { if (offset == TSDB_TBNAME_COLUMN_INDEX) { - extractMeterName(pMeter->meterId, param); - return param; + extractTableName(pMeter->meterId, param); } else { - char* tags = pMeter->pTagData + TSDB_METER_ID_LEN; // tag start position - return (tags + offset); + char* tags = pMeter->pTagData + offset + TSDB_METER_ID_LEN; // tag start position + memcpy(param, tags, len); // make sure the value is null-terminated string } + + return param; } -bool tSkipListNodeFilterCallback(tSkipListNode* pNode, void* param) { +bool tSkipListNodeFilterCallback(const void* pNode, void* param) { + tQueryInfo* pInfo = (tQueryInfo*)param; - STabObj* pMeter = (STabObj*)pNode->pData; + STabObj* pMeter = (STabObj*)(((tSkipListNode*)pNode)->pData); - char name[TSDB_METER_NAME_LEN + 1] = {0}; - char* val = getTagValueFromMeter(pMeter, pInfo->offset, name); + char buf[TSDB_MAX_TAGS_LEN] = {0}; + + char* val = getTagValueFromMeter(pMeter, pInfo->offset, pInfo->sch.bytes, buf); int8_t type = pInfo->sch.type; int32_t ret = 0; diff --git a/src/system/detail/src/mgmtUser.c b/src/system/detail/src/mgmtUser.c index 1acceb1ade7c7cecc66d972adf6e295719e9357c..89b83e3553f26a3d2cc755709086d8f5c4fa7f3f 100644 --- a/src/system/detail/src/mgmtUser.c +++ b/src/system/detail/src/mgmtUser.c @@ -54,8 +54,8 @@ void mgmtUserActionInit() { } void *mgmtUserAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtUserActionFp[action] != NULL) { - return (*(mgmtUserActionFp[action]))(row, str, size, ssize); + if (mgmtUserActionFp[(uint8_t)action] != NULL) { + return (*(mgmtUserActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } diff --git a/src/system/detail/src/mgmtVgroup.c b/src/system/detail/src/mgmtVgroup.c index 6efe9d666044a0ef2675d27ac741632ff821561f..15a5fe808b44c5cc3f94f06bb691a11aa8998122 100644 --- a/src/system/detail/src/mgmtVgroup.c +++ b/src/system/detail/src/mgmtVgroup.c @@ -19,7 +19,7 @@ #include "mgmt.h" #include "tschemautil.h" #include "tlog.h" -#include "tstatus.h" +#include "vnodeStatus.h" void * vgSdb = NULL; int tsVgUpdateSize; @@ -56,8 +56,8 @@ void mgmtVgroupActionInit() { } void *mgmtVgroupAction(char action, void *row, char *str, int size, int *ssize) { - if (mgmtVgroupActionFp[action] != NULL) { - return (*(mgmtVgroupActionFp[action]))(row, str, size, ssize); + if (mgmtVgroupActionFp[(uint8_t)action] != NULL) { + return (*(mgmtVgroupActionFp[(uint8_t)action]))(row, str, size, ssize); } return NULL; } @@ -103,13 +103,17 @@ int mgmtInitVgroups() { } taosIdPoolReinit(pVgroup->idPool); -#ifdef CLUSTER - if (pVgroup->vnodeGid[0].publicIp == 0) { - pVgroup->vnodeGid[0].publicIp = inet_addr(tsPublicIp); - pVgroup->vnodeGid[0].ip = inet_addr(tsPrivateIp); - sdbUpdateRow(vgSdb, pVgroup, tsVgUpdateSize, 1); + + if (tsIsCluster) { + /* + * Upgrade from open source version to cluster version for the first time + */ + if (pVgroup->vnodeGid[0].publicIp == 0) { + pVgroup->vnodeGid[0].publicIp = inet_addr(tsPublicIp); + pVgroup->vnodeGid[0].ip = inet_addr(tsPrivateIp); + sdbUpdateRow(vgSdb, pVgroup, tsVgUpdateSize, 1); + } } -#endif mgmtSetDnodeVgid(pVgroup->vnodeGid, pVgroup->numOfVnodes, pVgroup->vgId); } @@ -233,11 +237,25 @@ int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; - int maxReplica = 0; - SVgObj *pVgroup = pDb->pHead; - while (pVgroup != NULL) { + int maxReplica = 0; + SVgObj *pVgroup = NULL; + STabObj *pMeter = NULL; + if (pShow->payloadLen > 0 ) { + pMeter = mgmtGetMeter(pShow->payload); + if (NULL == pMeter) { + return TSDB_CODE_INVALID_METER_ID; + } + + pVgroup = mgmtGetVgroup(pMeter->gid.vgId); + if (NULL == pVgroup) return TSDB_CODE_INVALID_METER_ID; + maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; - pVgroup = pVgroup->next; + } else { + SVgObj *pVgroup = pDb->pHead; + while (pVgroup != NULL) { + maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; + pVgroup = pVgroup->next; + } } for (int i = 0; i < maxReplica; ++i) { @@ -272,10 +290,16 @@ int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { pShow->offset[0] = 0; for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; - pShow->numOfRows = pDb->numOfVgroups; - pShow->pNode = pDb->pHead; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + if (NULL == pMeter) { + pShow->numOfRows = pDb->numOfVgroups; + pShow->pNode = pDb->pHead; + } else { + pShow->numOfRows = 1; + pShow->pNode = pVgroup; + } + return 0; } @@ -290,6 +314,7 @@ int mgmtRetrieveVgroups(SShowObj *pShow, char *data, int rows, SConnObj *pConn) SDbObj *pDb = NULL; if (pConn->pDb != NULL) pDb = mgmtGetDb(pConn->pDb->name); + assert(pDb != NULL); pVgroup = pDb->pHead; while (pVgroup != NULL) { diff --git a/src/system/detail/src/vnodeCache.c b/src/system/detail/src/vnodeCache.c index 30dd8f7375f9e6f4ed7883f39ba037af54d7896b..9f078b09ffd4e15e2221dc818e22d03fc8396ddd 100644 --- a/src/system/detail/src/vnodeCache.c +++ b/src/system/detail/src/vnodeCache.c @@ -20,7 +20,7 @@ #include "vnode.h" #include "vnodeCache.h" #include "vnodeUtil.h" -#include "tstatus.h" +#include "vnodeStatus.h" void vnodeSearchPointInCache(SMeterObj *pObj, SQuery *pQuery); void vnodeProcessCommitTimer(void *param, void *tmrId); @@ -630,7 +630,14 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { setNullN(pData, type, bytes, pCacheBlock->numOfPoints); } else { pRead = pCacheBlock->offset[colIdx] + startPos * bytes; - memcpy(pData, pRead, numOfReads * bytes); + + if (QUERY_IS_ASC_QUERY(pQuery)) { + memcpy(pData, pRead, numOfReads * bytes); + } else { + for(int32_t j = 0; j < numOfReads; ++j) { + memcpy(pData + bytes * j, pRead + (numOfReads - 1 - j) * bytes, bytes); + } + } } } numOfQualifiedPoints = numOfReads; @@ -653,8 +660,8 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j < pCacheBlock->numOfPoints; ++j) { TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); if (key < startkey || key > endkey) { - dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); + dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -668,8 +675,7 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { } ids[numOfQualifiedPoints] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } @@ -678,8 +684,8 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j >= 0; --j) { TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); if (key < startkey || key > endkey) { - dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); + dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -691,23 +697,22 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { continue; } - - ids[numOfReads - numOfQualifiedPoints - 1] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + + ids[numOfQualifiedPoints] = j; + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } } - int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; +// int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; for (int32_t j = 0; j < numOfQualifiedPoints; ++j) { for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { int16_t colIndex = pQuery->pSelectExpr[col].pBase.colInfo.colIdx; int32_t bytes = pObj->schema[colIndex].bytes; pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes; - pRead = pCacheBlock->offset[colIndex] + ids[j + start] * bytes; + pRead = pCacheBlock->offset[colIndex] + ids[j/* + start*/] * bytes; memcpy(pData, pRead, bytes); } @@ -962,10 +967,11 @@ void vnodeSetCommitQuery(SMeterObj *pObj, SQuery *pQuery) { if (firstKey < pQuery->skey) { pQuery->over = 1; - dTrace("vid:%d sid:%d id:%s, first key is small, keyFirst:%ld commitFirstKey:%ld", + dTrace("vid:%d sid:%d id:%s, first key is small, keyFirst:%" PRId64 " commitFirstKey:%" PRId64 "", pObj->vnode, pObj->sid, pObj->meterId, firstKey, pQuery->skey); pthread_mutex_lock(&(pVnode->vmutex)); if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + assert(pVnode->firstKey > 0); pthread_mutex_unlock(&(pVnode->vmutex)); } } @@ -1013,7 +1019,7 @@ int vnodeSyncRetrieveCache(int vnode, int fd) { if (taosWriteMsg(fd, &(pObj->lastKeyOnFile), sizeof(pObj->lastKeyOnFile)) <= 0) return -1; if (taosWriteMsg(fd, &(pInfo->commitPoint), sizeof(pInfo->commitPoint)) <= 0) return -1; - dTrace("vid:%d sid:%d id:%s, send lastKey:%lld lastKeyOnFile:%lld", vnode, sid, pObj->meterId, pObj->lastKey, + dTrace("vid:%d sid:%d id:%s, send lastKey:%" PRId64 " lastKeyOnFile:%" PRId64, vnode, sid, pObj->meterId, pObj->lastKey, pObj->lastKeyOnFile); slot = pInfo->commitSlot; @@ -1033,7 +1039,7 @@ int vnodeSyncRetrieveCache(int vnode, int fd) { if (taosWriteMsg(fd, pBlock->offset[col], pObj->schema[col].bytes * points) <= 0) return -1; TSKEY lastKey = *((TSKEY *)(pBlock->offset[0] + pObj->schema[0].bytes * (points - 1))); - dTrace("vid:%d sid:%d id:%s, cache block is sent, points:%d lastKey:%ld", vnode, sid, pObj->meterId, points, + dTrace("vid:%d sid:%d id:%s, cache block is sent, points:%d lastKey:%" PRId64, vnode, sid, pObj->meterId, points, lastKey); blocksSent++; @@ -1097,7 +1103,7 @@ int vnodeSyncRestoreCache(int vnode, int fd) { if (taosReadMsg(fd, &(pObj->lastKeyOnFile), sizeof(pObj->lastKeyOnFile)) <= 0) return -1; if (taosReadMsg(fd, &(pInfo->commitPoint), sizeof(pInfo->commitPoint)) <= 0) return -1; - dTrace("vid:%d sid:%d id:%s, commitPoint:%d lastKeyOnFile:%ld", vnode, sid, pObj->meterId, pInfo->commitPoint, + dTrace("vid:%d sid:%d id:%s, commitPoint:%d lastKeyOnFile:%" PRId64, vnode, sid, pObj->meterId, pInfo->commitPoint, pObj->lastKeyOnFile); if (vnodeList[pObj->vnode].lastKey < pObj->lastKey) vnodeList[pObj->vnode].lastKey = pObj->lastKey; @@ -1135,7 +1141,7 @@ int vnodeSyncRestoreCache(int vnode, int fd) { if (vnodeList[pObj->vnode].firstKey > *(TSKEY *)(pBlock->offset[0])) vnodeList[pObj->vnode].firstKey = *(TSKEY *)(pBlock->offset[0]); - dTrace("vid:%d sid:%d id:%s, cache block is received, points:%d lastKey:%ld", vnode, sid, pObj->meterId, points, + dTrace("vid:%d sid:%d id:%s, cache block is received, points:%d lastKey:%" PRId64, vnode, sid, pObj->meterId, points, pObj->lastKey); } } diff --git a/src/system/detail/src/vnodeCommit.c b/src/system/detail/src/vnodeCommit.c index 57bb52eb23d1a2d2604712709965c3b7bdd21f17..a650376afac0615c1966e6c0181adfdf41910329 100644 --- a/src/system/detail/src/vnodeCommit.c +++ b/src/system/detail/src/vnodeCommit.c @@ -19,6 +19,7 @@ #include "tsdb.h" #include "vnode.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" typedef struct { int sversion; @@ -50,7 +51,7 @@ int vnodeOpenCommitLog(int vnode, uint64_t firstV) { int64_t length = statbuf.st_size; if (length != pVnode->mappingSize) { - dError("vid:%d, logfd:%d, alloc file size:%ld not equal to mapping size:%ld", vnode, pVnode->logFd, length, + dError("vid:%d, logfd:%d, alloc file size:%" PRId64 " not equal to mapping size:%" PRId64, vnode, pVnode->logFd, length, pVnode->mappingSize); goto _err_log_open; } @@ -165,7 +166,7 @@ size_t vnodeRestoreDataFromLog(int vnode, char *fileName, uint64_t *firstV) { continue; } - if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dWarn("vid:%d sid:%d id:%s, meter is dropped, ignore data in commit log, contLen:%d action:%d", vnode, head.sid, head.contLen, head.action); continue; diff --git a/src/system/detail/src/vnodeFile.c b/src/system/detail/src/vnodeFile.c index 860b7624dafde0dfa95ab304feec819f5f5a0cc0..8ac2f212112f38b84dd8998e0b7353e091a46d57 100644 --- a/src/system/detail/src/vnodeFile.c +++ b/src/system/detail/src/vnodeFile.c @@ -21,6 +21,7 @@ #include "vnode.h" #include "vnodeFile.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" #define FILE_QUERY_NEW_BLOCK -5 // a special negative number @@ -126,7 +127,7 @@ int vnodeCreateHeadDataFile(int vnode, int fileId, char *headName, char *dataNam if (symlink(dDataName, dataName) != 0) return -1; if (symlink(dLastName, lastName) != 0) return -1; - dPrint("vid:%d, fileId:%d, empty header file:%s dataFile:%s lastFile:%s on disk:%s is created ", + dPrint("vid:%d, fileId:%d, empty header file:%s file:%s lastFile:%s on disk:%s is created ", vnode, fileId, headName, dataName, lastName, path); return 0; @@ -187,16 +188,16 @@ int vnodeCreateNeccessaryFiles(SVnodeObj *pVnode) { if (pVnode->lastKeyOnFile == 0) { if (pCfg->daysPerFile == 0) pCfg->daysPerFile = 10; - pVnode->fileId = pVnode->firstKey / tsMsPerDay[pVnode->cfg.precision] / pCfg->daysPerFile; - pVnode->lastKeyOnFile = (int64_t)(pVnode->fileId + 1) * pCfg->daysPerFile * tsMsPerDay[pVnode->cfg.precision] - 1; + pVnode->fileId = pVnode->firstKey / tsMsPerDay[(uint8_t)pVnode->cfg.precision] / pCfg->daysPerFile; + pVnode->lastKeyOnFile = (int64_t)(pVnode->fileId + 1) * pCfg->daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision] - 1; pVnode->numOfFiles = 1; if (vnodeCreateEmptyCompFile(vnode, pVnode->fileId) < 0) return -1; } - numOfFiles = (pVnode->lastKeyOnFile - pVnode->commitFirstKey) / tsMsPerDay[pVnode->cfg.precision] / pCfg->daysPerFile; + numOfFiles = (pVnode->lastKeyOnFile - pVnode->commitFirstKey) / tsMsPerDay[(uint8_t)pVnode->cfg.precision] / pCfg->daysPerFile; if (pVnode->commitFirstKey > pVnode->lastKeyOnFile) numOfFiles = -1; - dTrace("vid:%d, commitFirstKey:%ld lastKeyOnFile:%ld numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", pVnode->vnode, + dTrace("vid:%d, commitFirstKey:%" PRId64 " lastKeyOnFile:%" PRId64 " numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", pVnode->vnode, pVnode->commitFirstKey, pVnode->lastKeyOnFile, numOfFiles, pVnode->fileId, pVnode->numOfFiles); if (numOfFiles >= pVnode->numOfFiles) { @@ -221,15 +222,15 @@ int vnodeCreateNeccessaryFiles(SVnodeObj *pVnode) { #else return -1; #endif - pVnode->lastKeyOnFile += (int64_t)tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile; + pVnode->lastKeyOnFile += (int64_t)tsMsPerDay[(uint8_t)pVnode->cfg.precision] * pCfg->daysPerFile; filesAdded = 1; numOfFiles = 0; // hacker way } fileId = pVnode->fileId - numOfFiles; pVnode->commitLastKey = - pVnode->lastKeyOnFile - (int64_t)numOfFiles * tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile; - pVnode->commitFirstKey = pVnode->commitLastKey - (int64_t)tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile + 1; + pVnode->lastKeyOnFile - (int64_t)numOfFiles * tsMsPerDay[(uint8_t)pVnode->cfg.precision] * pCfg->daysPerFile; + pVnode->commitFirstKey = pVnode->commitLastKey - (int64_t)tsMsPerDay[(uint8_t)pVnode->cfg.precision] * pCfg->daysPerFile + 1; pVnode->commitFileId = fileId; pVnode->numOfFiles = pVnode->numOfFiles + filesAdded; @@ -244,14 +245,13 @@ int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { int len = 0; struct stat filestat; int vnode = pVnode->vnode; - int fileId, numOfFiles, filesAdded = 0; - SVnodeCfg * pCfg = &pVnode->cfg; + int fileId; if (vnodeCreateNeccessaryFiles(pVnode) < 0) return -1; fileId = pVnode->commitFileId; - dTrace("vid:%d, commit fileId:%d, commitLastKey:%ld, vnodeLastKey:%ld, lastKeyOnFile:%ld numOfFiles:%d", + dTrace("vid:%d, commit fileId:%d, commitLastKey:%" PRId64 ", vnodeLastKey:%" PRId64 ", lastKeyOnFile:%" PRId64 " numOfFiles:%d", vnode, fileId, pVnode->commitLastKey, pVnode->lastKey, pVnode->lastKeyOnFile, pVnode->numOfFiles); int minSize = sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM) + TSDB_FILE_HEADER_LEN; @@ -506,7 +506,7 @@ void *vnodeCommitMultiToFile(SVnodeObj *pVnode, int ssid, int esid) { SVnodeHeadInfo headInfo; uint8_t * pOldCompBlocks; - dPrint("vid:%d, committing to file, firstKey:%ld lastKey:%ld ssid:%d esid:%d", vnode, pVnode->firstKey, + dPrint("vid:%d, committing to file, firstKey:%" PRId64 " lastKey:%" PRId64 " ssid:%d esid:%d", vnode, pVnode->firstKey, pVnode->lastKey, ssid, esid); if (pVnode->lastKey == 0) goto _over; @@ -573,7 +573,7 @@ _again: memset(&query, 0, sizeof(query)); if (vnodeOpenCommitFiles(pVnode, ssid) < 0) goto _over; - dTrace("vid:%d, start to commit, commitFirstKey:%ld commitLastKey:%ld", vnode, pVnode->commitFirstKey, + dTrace("vid:%d, start to commit, commitFirstKey:%" PRId64 " commitLastKey:%" PRId64, vnode, pVnode->commitFirstKey, pVnode->commitLastKey); headLen = 0; @@ -612,7 +612,7 @@ _again: } // meter is going to be deleted, abort - if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dWarn("vid:%d sid:%d is dropped, ignore this meter", vnode, sid); continue; } @@ -642,7 +642,7 @@ _again: read(pVnode->hfd, &pMeter->lastBlock, sizeof(SCompBlock)); } } else { - dTrace("vid:%d sid:%d id:%s, uid:%ld is not matched w/ old:%ld, old data will be thrown away", + dTrace("vid:%d sid:%d id:%s, uid:%" PRIu64 " is not matched with old:%" PRIu64 ", old data will be thrown away", vnode, sid, pObj->meterId, pObj->uid, compInfo.uid); pMeter->oldNumOfBlocks = 0; } @@ -683,7 +683,7 @@ _again: query.sdata = data; vnodeSetCommitQuery(pObj, &query); - dTrace("vid:%d sid:%d id:%s, start to commit, startKey:%lld slot:%d pos:%d", pObj->vnode, pObj->sid, pObj->meterId, + dTrace("vid:%d sid:%d id:%s, start to commit, startKey:%" PRId64 " slot:%d pos:%d", pObj->vnode, pObj->sid, pObj->meterId, pObj->lastKeyOnFile, query.slot, query.pos); pointsRead = 0; @@ -760,7 +760,7 @@ _again: pMeter->newNumOfBlocks++; pMeter->committedPoints += (pointsRead - pointsReadLast); - dTrace("vid:%d sid:%d id:%s, pointsRead:%d, pointsReadLast:%d lastKey:%lld, " + dTrace("vid:%d sid:%d id:%s, pointsRead:%d, pointsReadLast:%d lastKey:%" PRId64 ", " "slot:%d pos:%d newNumOfBlocks:%d headLen:%d", pObj->vnode, pObj->sid, pObj->meterId, pointsRead, pointsReadLast, pObj->lastKeyOnFile, query.slot, query.pos, pMeter->newNumOfBlocks, headLen); @@ -771,7 +771,7 @@ _again: pointsReadLast = 0; } - dTrace("vid:%d sid:%d id:%s, %d points are committed, lastKey:%lld slot:%d pos:%d newNumOfBlocks:%d", + dTrace("vid:%d sid:%d id:%s, %d points are committed, lastKey:%" PRId64 " slot:%d pos:%d newNumOfBlocks:%d", pObj->vnode, pObj->sid, pObj->meterId, pMeter->committedPoints, pObj->lastKeyOnFile, query.slot, query.pos, pMeter->newNumOfBlocks); @@ -1093,7 +1093,7 @@ int vnodeReadColumnToMem(int fd, SCompBlock *pBlock, SField **fields, int col, c } if (len <= 0) { - dError("failed to read col:%d, offset:%ld, reason:%s", col, tfields[col].offset, strerror(errno)); + dError("failed to read col:%d, offset:%d, reason:%s", col, (int32_t)(tfields[col].offset), strerror(errno)); return -1; } @@ -1218,7 +1218,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] int dfd = pVnode->dfd; if (pCompBlock->last && (points < pObj->pointsPerFileBlock * tsFileBlockMinPercent)) { - dTrace("vid:%d sid:%d id:%s, points:%d are written to last block, block stime: %ld, block etime: %ld", + dTrace("vid:%d sid:%d id:%s, points:%d are written to last block, block stime: %" PRId64 ", block etime: %" PRId64, pObj->vnode, pObj->sid, pObj->meterId, points, *((TSKEY *)(data[0]->data)), *((TSKEY * )(data[0]->data + (points - 1) * pObj->schema[0].bytes))); pCompBlock->last = 1; @@ -1246,7 +1246,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] // assert(data[i]->len == points*pObj->schema[i].bytes); if (pCfg->compression) { - cdata[i]->len = (*pCompFunc[pObj->schema[i].type])(data[i]->data, points * pObj->schema[i].bytes, points, + cdata[i]->len = (*pCompFunc[(uint8_t)pObj->schema[i].type])(data[i]->data, points * pObj->schema[i].bytes, points, cdata[i]->data, pObj->schema[i].bytes*pObj->pointsPerFileBlock+EXTRA_BYTES, pCfg->compression, buffer, bufferSize); fields[i].len = cdata[i]->len; @@ -1303,7 +1303,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] pCompBlock->len += wlen; } - dTrace("vid:%d, vnode compStorage size is: %ld", pObj->vnode, pVnode->vnodeStatistic.compStorage); + dTrace("vid:%d, vnode compStorage size is: %" PRId64, pObj->vnode, pVnode->vnodeStatistic.compStorage); pCompBlock->algorithm = pCfg->compression; pCompBlock->numOfPoints = points; @@ -1339,7 +1339,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { if (pVnode->numOfFiles <= 0) return 0; SVnodeCfg *pCfg = &pVnode->cfg; - delta = (int64_t)pCfg->daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + delta = (int64_t)pCfg->daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; latest = pObj->lastKeyOnFile; oldest = (pVnode->fileId - pVnode->numOfFiles + 1) * delta; @@ -1355,7 +1355,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { if (pQuery->skey < oldest) pQuery->skey = oldest; } - dTrace("vid:%d sid:%d id:%s, skey:%ld ekey:%ld oldest:%ld latest:%ld fileId:%d numOfFiles:%d", + dTrace("vid:%d sid:%d id:%s, skey:%" PRId64 " ekey:%" PRId64 " oldest:%" PRId64 " latest:%" PRId64 " fileId:%d numOfFiles:%d", pObj->vnode, pObj->sid, pObj->meterId, pQuery->skey, pQuery->ekey, oldest, latest, pVnode->fileId, pVnode->numOfFiles); @@ -1383,7 +1383,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { firstSlot = 0; lastSlot = pQuery->numOfBlocks - 1; - numOfBlocks = pQuery->numOfBlocks; + //numOfBlocks = pQuery->numOfBlocks; if (QUERY_IS_ASC_QUERY(pQuery) && pBlock[lastSlot].keyLast < pQuery->skey) continue; if (!QUERY_IS_ASC_QUERY(pQuery) && pBlock[firstSlot].keyFirst > pQuery->skey) continue; @@ -1640,11 +1640,15 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { pData = pQuery->sdata[i]->data + pQuery->pointsOffset * bytes; pRead = sdata[colBufferIndex]->data + startPos * bytes; - memcpy(pData, pRead, numOfReads * bytes); + if (QUERY_IS_ASC_QUERY(pQuery)) { + memcpy(pData, pRead, numOfReads * bytes); + } else { //reversed copy to output buffer + for(int32_t j = 0; j < numOfReads; ++j) { + memcpy(pData + bytes * j, pRead + (numOfReads - 1 - j) * bytes, bytes); + } + } } - numOfQualifiedPoints = numOfReads; - } else { // check each data one by one set the input column data for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { @@ -1659,8 +1663,8 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j < pBlock->numOfPoints; j -= step) { TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); if (key < startKey || key > endKey) { - dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); + dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -1675,8 +1679,7 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { } ids[numOfQualifiedPoints] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } @@ -1684,8 +1687,8 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = pQuery->pos; j >= 0; --j) { TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); if (key < startKey || key > endKey) { - dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); + dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -1698,22 +1701,21 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { continue; } - - ids[numOfReads - numOfQualifiedPoints - 1] = j; - if (++numOfQualifiedPoints == numOfReads) { - // qualified data are enough + + ids[numOfQualifiedPoints] = j; + if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough break; } } } - int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; +// int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; for (int32_t j = 0; j < numOfQualifiedPoints; ++j) { for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { int16_t colIndexInBuffer = pQuery->pSelectExpr[col].pBase.colInfo.colIdxInBuf; int32_t bytes = GET_COLUMN_BYTES(pQuery, col); pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes; - pRead = sdata[colIndexInBuffer]->data + ids[j + start] * bytes; + pRead = sdata[colIndexInBuffer]->data + ids[j/* + start*/] * bytes; memcpy(pData, pRead, bytes); } diff --git a/src/system/detail/src/vnodeImport.c b/src/system/detail/src/vnodeImport.c index f0019a92ee4bb9d2fd9b3372be2daec1b59be9c6..7ebab90f0baed0b79936fced88c3a129cff6f170 100644 --- a/src/system/detail/src/vnodeImport.c +++ b/src/system/detail/src/vnodeImport.c @@ -18,6 +18,7 @@ #include "vnode.h" #include "vnodeUtil.h" +#include "vnodeStatus.h" extern void vnodeGetHeadTname(char *nHeadName, char *nLastName, int vnode, int fileId); extern int vnodeReadColumnToMem(int fd, SCompBlock *pBlock, SField **fields, int col, char *data, int dataSize, @@ -118,7 +119,7 @@ int vnodeFindKeyInCache(SImportInfo *pImport, int order) { if (pInfo->commitPoint >= pObj->pointsPerBlock) pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; pImport->pos = 0; pImport->key = 0; - dTrace("vid:%d sid:%d id:%s, key:%ld, import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); + dTrace("vid:%d sid:%d id:%s, key:%" PRId64 ", import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); code = 0; } else { pImport->slot = query.slot; @@ -146,7 +147,7 @@ int vnodeFindKeyInCache(SImportInfo *pImport, int order) { void vnodeGetValidDataRange(int vnode, TSKEY now, TSKEY *minKey, TSKEY *maxKey) { SVnodeObj *pVnode = vnodeList + vnode; - int64_t delta = pVnode->cfg.daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + int64_t delta = pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; int fid = now / delta; *minKey = (fid - pVnode->maxFiles + 1) * delta; *maxKey = (fid + 2) * delta - 1; @@ -183,8 +184,8 @@ int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi vnodeGetValidDataRange(pObj->vnode, now, &minKey, &maxKey); if (firstKey < minKey || firstKey > maxKey || lastKey < minKey || lastKey > maxKey) { dError( - "vid:%d sid:%d id:%s, invalid timestamp to import, rows:%d firstKey: %ld lastKey: %ld minAllowedKey:%ld " - "maxAllowedKey:%ld", + "vid:%d sid:%d id:%s, invalid timestamp to import, rows:%d firstKey: %" PRId64 " lastKey: %" PRId64 " minAllowedKey:%" PRId64 " " + "maxAllowedKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, minKey, maxKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } @@ -220,7 +221,7 @@ int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi SImportInfo import = {0}; - dTrace("vid:%d sid:%d id:%s, try to import %d rows data, firstKey:%ld, lastKey:%ld, object lastKey:%ld", + dTrace("vid:%d sid:%d id:%s, try to import %d rows data, firstKey:%" PRId64 ", lastKey:%" PRId64 ", object lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, pObj->lastKey); import.firstKey = firstKey; @@ -490,7 +491,7 @@ static int vnodeLoadNeededBlockData(SMeterObj *pObj, SImportHandle *pHandle, int lseek(dfd, pBlock->offset, SEEK_SET); if (read(dfd, (void *)(pHandle->pField), pHandle->pFieldSize) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to read data file, size:%ld reason:%s", pVnode->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to read data file, size:%zu reason:%s", pVnode->vnode, pObj->sid, pObj->meterId, pHandle->pFieldSize, strerror(errno)); *code = TSDB_CODE_FILE_CORRUPTED; return -1; @@ -578,7 +579,12 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { SVnodeObj *pVnode = vnodeList + pObj->vnode; char dpath[TSDB_FILENAME_LEN] = "\0"; SCompInfo compInfo; - __off_t offset = 0; + +#ifdef _ALPINE + off_t offset = 0; +#else + __off_t offset = 0; +#endif if (pVnode->nfd > 0) { offset = lseek(pVnode->nfd, 0, SEEK_CUR); @@ -604,7 +610,7 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { lseek(pVnode->nfd, 0, SEEK_END); lseek(pVnode->hfd, pHandle->nextNo0Offset, SEEK_SET); if (tsendfile(pVnode->nfd, pVnode->hfd, NULL, pHandle->hfSize - pHandle->nextNo0Offset) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to sendfile, size:%ld, reason:%s", pObj->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to sendfile, size:%" PRId64 ", reason:%s", pObj->vnode, pObj->sid, pObj->meterId, pHandle->hfSize - pHandle->nextNo0Offset, strerror(errno)); return -1; } @@ -621,7 +627,7 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { taosCalcChecksumAppend(0, (uint8_t *)(pHandle->pHeader), pHandle->pHeaderSize); lseek(pVnode->nfd, TSDB_FILE_HEADER_LEN, SEEK_SET); if (twrite(pVnode->nfd, (void *)(pHandle->pHeader), pHandle->pHeaderSize) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to wirte SCompHeader part, size:%ld, reason:%s", pObj->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to wirte SCompHeader part, size:%zu, reason:%s", pObj->vnode, pObj->sid, pObj->meterId, pHandle->pHeaderSize, strerror(errno)); return -1; } @@ -682,7 +688,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int SCacheInfo * pInfo = (SCacheInfo *)(pObj->pCache); TSKEY lastKeyImported = 0; - TSKEY delta = pVnode->cfg.daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + TSKEY delta = pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; TSKEY minFileKey = fid * delta; TSKEY maxFileKey = minFileKey + delta - 1; TSKEY firstKey = KEY_AT_INDEX(payload, pObj->bytesPerPoint, 0); @@ -905,6 +911,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int blockIter.nextKey = maxFileKey + 1; } else { // Case 3. need to search the block for slot and pos if (key == minKey || key == maxKey) { + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -933,6 +940,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int } while (left < right); if (key == blockMinKey || key == blockMaxKey) { // duplicate key + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -949,6 +957,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int if (key == importHandle.pBlocks[blockIter.slot].keyFirst || key == importHandle.pBlocks[blockIter.slot].keyLast) { + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -970,6 +979,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, pBlock->numOfPoints, key, TSQL_SO_ASC); assert(pos != 0); if (KEY_AT_INDEX(importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY), pos) == key) { + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } @@ -1100,6 +1110,7 @@ static int vnodeMergeDataIntoFile(SImportInfo *pImport, const char *payload, int if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) == KEY_AT_INDEX(importHandle.data[PRIMARYKEY_TIMESTAMP_COL_INDEX]->data, sizeof(TSKEY), blockIter.pos)) { // duplicate key + if (tsAffectedRowsMod) pointsImported++; payloadIter++; continue; } else if (KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter) < @@ -1314,7 +1325,10 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int pImport->lastKey = lastKey; for (payloadIter = 0; payloadIter < rows; payloadIter++) { TSKEY key = KEY_AT_INDEX(payload, pObj->bytesPerPoint, payloadIter); - if (key == pObj->lastKey) continue; + if (key == pObj->lastKey) { + if (tsAffectedRowsMod) rowsImported++; + continue; + } if (key > pObj->lastKey) { // Just as insert pImport->slot = pInfo->currentSlot; pImport->pos = pInfo->cacheBlocks[pImport->slot]->numOfPoints; @@ -1327,11 +1341,12 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int } if (pImport->firstKey != pImport->key) break; + if (tsAffectedRowsMod) rowsImported++; } } if (payloadIter == rows) { - pImport->importedRows = 0; + pImport->importedRows += rowsImported; code = 0; goto _exit; } @@ -1464,6 +1479,7 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int payloadIter++; } else { + if (tsAffectedRowsMod) rowsImported++; payloadIter++; continue; } @@ -1499,7 +1515,7 @@ int vnodeImportDataToFiles(SImportInfo *pImport, char *payload, const int rows) SMeterObj *pObj = (SMeterObj *)(pImport->pObj); SVnodeObj *pVnode = vnodeList + pObj->vnode; - int64_t delta = pVnode->cfg.daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + int64_t delta = pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; int sfid = KEY_AT_INDEX(payload, pObj->bytesPerPoint, 0) / delta; int efid = KEY_AT_INDEX(payload, pObj->bytesPerPoint, rows - 1) / delta; @@ -1512,7 +1528,7 @@ int vnodeImportDataToFiles(SImportInfo *pImport, char *payload, const int rows) assert(nrows > 0); - dTrace("vid:%d sid:%d meterId:%s, %d rows of data will be imported to file %d, srow:%d firstKey:%ld lastKey:%ld", + dTrace("vid:%d sid:%d meterId:%s, %d rows of data will be imported to file %d, srow:%d firstKey:%" PRId64 " lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, nrows, fid, srow, KEY_AT_INDEX(payload, pObj->bytesPerPoint, srow), KEY_AT_INDEX(payload, pObj->bytesPerPoint, (srow + nrows - 1))); diff --git a/src/system/detail/src/vnodeMeter.c b/src/system/detail/src/vnodeMeter.c index 190ddb1b096040421a999fa9b364a46b9a72a7fe..79610a73ad1b368def881f3f80979cb790f76bfe 100644 --- a/src/system/detail/src/vnodeMeter.c +++ b/src/system/detail/src/vnodeMeter.c @@ -24,9 +24,7 @@ #include "vnodeMgmt.h" #include "vnodeShell.h" #include "vnodeUtil.h" -#include "tstatus.h" - -#pragma GCC diagnostic ignored "-Wpointer-sign" +#include "vnodeStatus.h" #define VALID_TIMESTAMP(key, curKey, prec) (((key) >= 0) && ((key) <= ((curKey) + 36500 * tsMsPerDay[prec]))) @@ -83,7 +81,7 @@ int vnodeCreateMeterObjFile(int vnode) { if (errno == EACCES) { return TSDB_CODE_NO_DISK_PERMISSIONS; } else if (errno == ENOSPC) { - return TSDB_CODE_SERVER_NO_SPACE; + return TSDB_CODE_SERV_NO_DISKSPACE; } else { return TSDB_CODE_VG_INIT_FAILED; } @@ -355,7 +353,7 @@ int vnodeRestoreMeterObj(char *buffer, int64_t length) { // taosSetSecurityInfo(pObj->vnode, pObj->sid, pObj->meterId, pObj->spi, pObj->encrypt, pObj->secret, pObj->cipheringKey); - dTrace("vid:%d sid:%d id:%s, meter is restored, uid:%ld", pObj->vnode, pObj->sid, pObj->meterId, pObj->uid); + dTrace("vid:%d sid:%d id:%s, meter is restored, uid:%" PRIu64 "", pObj->vnode, pObj->sid, pObj->meterId, pObj->uid); return TSDB_CODE_SUCCESS; } @@ -495,7 +493,7 @@ int vnodeCreateMeterObj(SMeterObj *pNew, SConnSec *pSec) { vnodeSaveMeterObjToFile(pNew); // vnodeCreateMeterMgmt(pNew, pSec); vnodeCreateStream(pNew); - dTrace("vid:%d, sid:%d id:%s, meterObj is created, uid:%ld", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); + dTrace("vid:%d, sid:%d id:%s, meterObj is created, uid:%" PRIu64 "", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); } return code; @@ -528,7 +526,7 @@ int vnodeRemoveMeterObj(int vnode, int sid) { } // after remove this meter, change its state to DELETED - pObj->state = TSDB_METER_STATE_DELETED; + pObj->state = TSDB_METER_STATE_DROPPED; pObj->timeStamp = taosGetTimestampMs(); vnodeList[vnode].lastRemove = pObj->timeStamp; @@ -571,8 +569,8 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi if (numOfPoints >= (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock) { code = TSDB_CODE_BATCH_SIZE_TOO_BIG; - dError("vid:%d sid:%d id:%s, batch size too big, it shall be smaller than:%d", pObj->vnode, pObj->sid, - pObj->meterId, (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock); + dError("vid:%d sid:%d id:%s, batch size too big, insert points:%d, it shall be smaller than:%d", pObj->vnode, pObj->sid, + pObj->meterId, numOfPoints, (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock); return code; } @@ -607,28 +605,33 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi vnodeSendMeterCfgMsg(pObj->vnode, pObj->sid); code = TSDB_CODE_ACTION_IN_PROGRESS; return code; + } else if (pObj->sversion > sversion) { + dTrace("vid:%d sid:%d id:%s, client schema out of date, sql is invalid. client sversion:%d vnode sversion:%d", + pObj->vnode, pObj->sid, pObj->meterId, pObj->sversion, sversion); + code = TSDB_CODE_INVALID_SQL; + return code; } pData = pSubmit->payLoad; TSKEY firstKey = *((TSKEY *)pData); TSKEY lastKey = *((TSKEY *)(pData + pObj->bytesPerPoint * (numOfPoints - 1))); - int cfid = now/pVnode->cfg.daysPerFile/tsMsPerDay[pVnode->cfg.precision]; + int cfid = now/pVnode->cfg.daysPerFile/tsMsPerDay[(uint8_t)pVnode->cfg.precision]; - TSKEY minAllowedKey = (cfid - pVnode->maxFiles + 1)*pVnode->cfg.daysPerFile*tsMsPerDay[pVnode->cfg.precision]; - TSKEY maxAllowedKey = (cfid + 2)*pVnode->cfg.daysPerFile*tsMsPerDay[pVnode->cfg.precision] - 2; + TSKEY minAllowedKey = (cfid - pVnode->maxFiles + 1)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision]; + TSKEY maxAllowedKey = (cfid + 2)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision] - 2; if (firstKey < minAllowedKey || firstKey > maxAllowedKey || lastKey < minAllowedKey || lastKey > maxAllowedKey) { - dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%lld, data is out of range, numOfPoints:%d firstKey:%lld lastKey:%lld minAllowedKey:%lld maxAllowedKey:%lld", + dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%" PRId64 ", data is out of range, numOfPoints:%d firstKey:%" PRId64 " lastKey:%" PRId64 " minAllowedKey:%" PRId64 " maxAllowedKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pVnode->lastKeyOnFile, numOfPoints,firstKey, lastKey, minAllowedKey, maxAllowedKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } - if ((code = vnodeSetMeterInsertImportStateEx(pObj, TSDB_METER_STATE_INSERT)) != TSDB_CODE_SUCCESS) { + if ((code = vnodeSetMeterInsertImportStateEx(pObj, TSDB_METER_STATE_INSERTING)) != TSDB_CODE_SUCCESS) { goto _over; } for (i = 0; i < numOfPoints; ++i) { // meter will be dropped, abort current insertion - if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dWarn("vid:%d sid:%d id:%s, meter is dropped, abort insert, state:%d", pObj->vnode, pObj->sid, pObj->meterId, pObj->state); @@ -637,13 +640,13 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi } if (*((TSKEY *)pData) <= pObj->lastKey) { - dWarn("vid:%d sid:%d id:%s, received key:%ld not larger than lastKey:%ld", pObj->vnode, pObj->sid, pObj->meterId, + dWarn("vid:%d sid:%d id:%s, received key:%" PRId64 " not larger than lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, *((TSKEY *)pData), pObj->lastKey); pData += pObj->bytesPerPoint; continue; } - if (!VALID_TIMESTAMP(*((TSKEY *)pData), tsKey, pVnode->cfg.precision)) { + if (!VALID_TIMESTAMP(*((TSKEY *)pData), tsKey, (uint8_t)pVnode->cfg.precision)) { code = TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; break; } @@ -666,15 +669,16 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi if (pObj->lastKey > pVnode->lastKey) pVnode->lastKey = pObj->lastKey; if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + assert(pVnode->firstKey > 0); pVnode->version++; pthread_mutex_unlock(&(pVnode->vmutex)); - vnodeClearMeterState(pObj, TSDB_METER_STATE_INSERT); + vnodeClearMeterState(pObj, TSDB_METER_STATE_INSERTING); _over: - dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%ld source:%d, vnode total storage: %ld", + dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%" PRId64 " source:%d, vnode total storage: %" PRId64 "", pObj->vnode, pObj->sid, pObj->meterId, points, numOfPoints, pObj->lastKey, source, pVnode->vnodeStatistic.totalStorage); @@ -737,7 +741,7 @@ void vnodeUpdateMeter(void *param, void *tmrId) { } SMeterObj *pObj = pVnode->meterList[pNew->sid]; - if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dTrace("vid:%d sid:%d id:%s, meter is deleted, abort update schema", pNew->vnode, pNew->sid, pNew->meterId); free(pNew->schema); free(pNew); @@ -745,7 +749,7 @@ void vnodeUpdateMeter(void *param, void *tmrId) { } int32_t state = vnodeSetMeterState(pObj, TSDB_METER_STATE_UPDATING); - if (state >= TSDB_METER_STATE_DELETING) { + if (state >= TSDB_METER_STATE_DROPPING) { dError("vid:%d sid:%d id:%s, meter is deleted, failed to update, state:%d", pObj->vnode, pObj->sid, pObj->meterId, state); return; diff --git a/src/system/detail/src/vnodeQueryImpl.c b/src/system/detail/src/vnodeQueryImpl.c index 7617d505827cfc53186dde5fd1ddda112fd69c2f..dd86c6a35c0f8e6f1a81c67bca824b662bb1d647 100644 --- a/src/system/detail/src/vnodeQueryImpl.c +++ b/src/system/detail/src/vnodeQueryImpl.c @@ -14,6 +14,8 @@ */ #include "os.h" +#include "hash.h" +#include "hashutil.h" #include "taosmsg.h" #include "textbuffer.h" #include "ttime.h" @@ -31,6 +33,7 @@ #include "vnodeDataFilterFunc.h" #include "vnodeFile.h" #include "vnodeQueryImpl.h" +#include "vnodeStatus.h" enum { TS_JOIN_TS_EQUAL = 0, @@ -38,23 +41,25 @@ enum { TS_JOIN_TAG_NOT_EQUALS = 2, }; +enum { + DISK_BLOCK_NO_NEED_TO_LOAD = 0, + DISK_BLOCK_LOAD_TS = 1, + DISK_BLOCK_LOAD_BLOCK = 2, +}; + #define IS_DISK_DATA_BLOCK(q) ((q)->fileId >= 0) -//static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFilesInfo *pQueryFile, char *buf, uint64_t offset, -// int32_t size); static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFilesInfo *pQueryFile, char *buf, uint64_t offset, int32_t size); -//__read_data_fn_t readDataFunctor[2] = {copyDataFromMMapBuffer, readDataFromDiskFile}; - -static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo); +static void vnodeInitLoadCompBlockInfo(SLoadCompBlockInfo *pCompBlockLoadInfo); static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn, bool loadData); static int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, - SQueryRuntimeEnv *pRuntimeEnv, SMeterDataInfo *pMeterHeadDataInfo, + SQueryRuntimeEnv *pRuntimeEnv, SMeterDataInfo *pMeterDataInfo, int32_t start, int32_t end); -static TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index); +static TSKEY getTimestampInCacheBlock(SQueryRuntimeEnv *pRuntimeEnv, SCacheBlock *pBlock, int32_t index); static TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index); static void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, int32_t pos); @@ -62,23 +67,23 @@ static int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj static void setGroupOutputBuffer(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResult); -static void getAlignedIntervalQueryRange(SQuery *pQuery, TSKEY keyInData, TSKEY skey, TSKEY ekey); -static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pInfo, - SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields, - __block_search_fn_t searchFn); +static void getAlignedIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY keyInData, TSKEY skey, TSKEY ekey); -static void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult); -static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, - int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, - SField *pFields, __block_search_fn_t searchFn); +static int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult); +static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pMeterDataInfo, + SBlockInfo *pBlockInfo, int32_t blockStatus, SField *pFields, + __block_search_fn_t searchFn); -static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx); -static void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, - const SQueryRuntimeEnv *pRuntimeEnv); -static void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes); -static void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid); -static void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn); -static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); +static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx); +static int32_t flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, + const SQueryRuntimeEnv *pRuntimeEnv); +static void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes); +static void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid); +static TSKEY getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn); +static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); +static void doGetAlignedIntervalQueryRangeImpl(SQuery *pQuery, int64_t pKey, int64_t keyFirst, int64_t keyLast, + int64_t *actualSkey, int64_t *actualEkey, int64_t *skey, int64_t *ekey); +static void getNextLogicalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow* pTimeWindow); // check the offset value integrity static FORCE_INLINE int32_t validateHeaderOffsetSegment(SQInfo *pQInfo, char *filePath, int32_t vid, char *data, @@ -100,9 +105,10 @@ static FORCE_INLINE int32_t getCompHeaderStartPosition(SVnodeCfg *pCfg) { static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *pMeterObj, SCompHeader *pCompHeader, SQueryFilesInfo *pQueryFileInfo, int32_t headerSize) { - if (pCompHeader->compInfoOffset < headerSize || pCompHeader->compInfoOffset > pQueryFileInfo->headFileSize) { - dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid, size:%ld", pQInfo, pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, pCompHeader->compInfoOffset, pQueryFileInfo->headFileSize); + if (pCompHeader->compInfoOffset < headerSize || pCompHeader->compInfoOffset > pQueryFileInfo->headerFileSize) { + dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%" PRId64 " is not valid, size:%" PRId64, pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pCompHeader->compInfoOffset, + pQueryFileInfo->headerFileSize); return -1; } @@ -114,19 +120,19 @@ static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *p static FORCE_INLINE int32_t validateCompBlockInfoSegment(SQInfo *pQInfo, const char *filePath, int32_t vid, SCompInfo *compInfo, int64_t offset) { if (!taosCheckChecksumWhole((uint8_t *)compInfo, sizeof(SCompInfo))) { - dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%lld", pQInfo, vid, filePath, - offset); + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%" PRId64, pQInfo, vid, + filePath, offset); return -1; } return 0; } -static FORCE_INLINE int32_t validateCompBlockSegment(SQInfo *pQInfo, const char *filePath, SCompInfo *compInfo, char *pBlock, - int32_t vid, TSCKSUM checksum) { +static FORCE_INLINE int32_t validateCompBlockSegment(SQInfo *pQInfo, const char *filePath, SCompInfo *compInfo, + char *pBlock, int32_t vid, TSCKSUM checksum) { uint32_t size = compInfo->numOfBlocks * sizeof(SCompBlock); if (checksum != taosCalcChecksum(0, (uint8_t *)pBlock, size)) { - dLError("QInfo:%p vid:%d, failed to read header file:%s, file compblock is broken:%ld", pQInfo, vid, filePath, + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compblock is broken:%zu", pQInfo, vid, filePath, (char *)compInfo + sizeof(SCompInfo)); return -1; } @@ -156,6 +162,30 @@ bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) { return false; } +int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) { + assert(pGroupbyExpr != NULL); + + int32_t colId = -2; + int16_t type = TSDB_DATA_TYPE_NULL; + + for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) { + SColIndexEx *pColIndex = &pGroupbyExpr->columnInfo[i]; + if (pColIndex->flag == TSDB_COL_NORMAL) { + colId = pColIndex->colId; + break; + } + } + + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + if (colId == pQuery->colList[i].data.colId) { + type = pQuery->colList[i].data.type; + break; + } + } + + return type; +} + bool isSelectivityWithTagsQuery(SQuery *pQuery) { bool hasTags = false; int32_t numOfSelectivity = 0; @@ -190,12 +220,13 @@ static bool vnodeIsCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj SQuery *pQuery = pRuntimeEnv->pQuery; // check if data file header of this table has been loaded into memory, avoid to reloaded comp Block info - SQueryLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; + SLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; // if vnodeFreeFields is called, the pQuery->pFields is NULL if (pLoadCompBlockInfo->fileListIndex == fileIndex && pLoadCompBlockInfo->sid == pMeterObj->sid && pQuery->pFields != NULL && pQuery->fileId > 0) { - assert(pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID == pLoadCompBlockInfo->fileId && pQuery->numOfBlocks > 0); + assert(pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID == pLoadCompBlockInfo->fileId && + pQuery->numOfBlocks > 0); return true; } @@ -203,64 +234,87 @@ static bool vnodeIsCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj } static void vnodeSetCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIndex, int32_t sid) { - SQueryLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; + SLoadCompBlockInfo *pCompBlockLoadInfo = &pRuntimeEnv->loadCompBlockInfo; - pLoadCompBlockInfo->sid = sid; - pLoadCompBlockInfo->fileListIndex = fileIndex; - pLoadCompBlockInfo->fileId = pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID; + pCompBlockLoadInfo->sid = sid; + pCompBlockLoadInfo->fileListIndex = fileIndex; + pCompBlockLoadInfo->fileId = pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex].fileID; } -static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo) { +static void vnodeInitLoadCompBlockInfo(SLoadCompBlockInfo *pCompBlockLoadInfo) { pCompBlockLoadInfo->sid = -1; pCompBlockLoadInfo->fileId = -1; pCompBlockLoadInfo->fileListIndex = -1; } -static bool vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; +static int32_t vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex, + bool loadPrimaryTS) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SLoadDataBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; /* this block has been loaded into memory, return directly */ if (pLoadInfo->fileId == pQuery->fileId && pLoadInfo->slotIdx == pQuery->slot && pQuery->slot != -1 && - pLoadInfo->sid == pMeterObj->sid) { - assert(fileIndex == pLoadInfo->fileListIndex); - return true; + pLoadInfo->sid == pMeterObj->sid && pLoadInfo->fileListIndex == fileIndex) { + // previous load operation does not load the primary timestamp column, we only need to load the timestamp column + if (pLoadInfo->tsLoaded == false && pLoadInfo->tsLoaded != loadPrimaryTS) { + return DISK_BLOCK_LOAD_TS; + } else { + return DISK_BLOCK_NO_NEED_TO_LOAD; + } } - return false; + return DISK_BLOCK_LOAD_BLOCK; } -static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; +static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex, + bool tsLoaded) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SLoadDataBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; pLoadInfo->fileId = pQuery->fileId; pLoadInfo->slotIdx = pQuery->slot; pLoadInfo->fileListIndex = fileIndex; pLoadInfo->sid = pMeterObj->sid; + pLoadInfo->tsLoaded = tsLoaded; } -static void vnodeInitDataBlockInfo(SQueryLoadBlockInfo *pBlockLoadInfo) { +static void vnodeInitDataBlockInfo(SLoadDataBlockInfo *pBlockLoadInfo) { pBlockLoadInfo->slotIdx = -1; pBlockLoadInfo->fileId = -1; pBlockLoadInfo->sid = -1; pBlockLoadInfo->fileListIndex = -1; } -static void vnodeSetOpenedFileNames(SQueryFilesInfo* pVnodeFilesInfo) { +static void vnodeSetCurrentFileNames(SQueryFilesInfo *pVnodeFilesInfo) { assert(pVnodeFilesInfo->current >= 0 && pVnodeFilesInfo->current < pVnodeFilesInfo->numOfFiles); - - SHeaderFileInfo* pCurrentFileInfo = &pVnodeFilesInfo->pFileInfo[pVnodeFilesInfo->current]; - - // set the full file path for current opened files - snprintf(pVnodeFilesInfo->headerFilePath, PATH_MAX, "%sv%df%d.head", pVnodeFilesInfo->dbFilePathPrefix, - pVnodeFilesInfo->vnodeId, pCurrentFileInfo->fileID); - - snprintf(pVnodeFilesInfo->dataFilePath, PATH_MAX, "%sv%df%d.data", pVnodeFilesInfo->dbFilePathPrefix, - pVnodeFilesInfo->vnodeId, pCurrentFileInfo->fileID); - - snprintf(pVnodeFilesInfo->lastFilePath, PATH_MAX, "%sv%df%d.last", pVnodeFilesInfo->dbFilePathPrefix, - pVnodeFilesInfo->vnodeId, pCurrentFileInfo->fileID); + + SHeaderFileInfo *pCurrentFileInfo = &pVnodeFilesInfo->pFileInfo[pVnodeFilesInfo->current]; + + /* + * set the full file path for current opened files + * the maximum allowed path string length is PATH_MAX in Linux, 100 bytes is used to + * suppress the compiler warnings + */ + char str[PATH_MAX + 100] = {0}; + int32_t PATH_WITH_EXTRA = PATH_MAX + 100; + + int32_t vnodeId = pVnodeFilesInfo->vnodeId; + int32_t fileId = pCurrentFileInfo->fileID; + + int32_t len = snprintf(str, PATH_WITH_EXTRA, "%sv%df%d.head", pVnodeFilesInfo->dbFilePathPrefix, vnodeId, fileId); + assert(len <= PATH_MAX); + + strncpy(pVnodeFilesInfo->headerFilePath, str, PATH_MAX); + + len = snprintf(str, PATH_WITH_EXTRA, "%sv%df%d.data", pVnodeFilesInfo->dbFilePathPrefix, vnodeId, fileId); + assert(len <= PATH_MAX); + + strncpy(pVnodeFilesInfo->dataFilePath, str, PATH_MAX); + + len = snprintf(str, PATH_WITH_EXTRA, "%sv%df%d.last", pVnodeFilesInfo->dbFilePathPrefix, vnodeId, fileId); + assert(len <= PATH_MAX); + + strncpy(pVnodeFilesInfo->lastFilePath, str, PATH_MAX); } /** @@ -271,154 +325,128 @@ static void vnodeSetOpenedFileNames(SQueryFilesInfo* pVnodeFilesInfo) { * @return */ static FORCE_INLINE bool isHeaderFileEmpty(int32_t vnodeId, size_t headerFileSize) { - SVnodeCfg* pVnodeCfg = &vnodeList[vnodeId].cfg; + SVnodeCfg *pVnodeCfg = &vnodeList[vnodeId].cfg; return headerFileSize <= getCompHeaderStartPosition(pVnodeCfg); } -static bool checkIsHeaderFileEmpty(SQueryFilesInfo* pVnodeFilesInfo, int32_t vnodeId) { +static bool checkIsHeaderFileEmpty(SQueryFilesInfo *pVnodeFilesInfo) { struct stat fstat = {0}; if (stat(pVnodeFilesInfo->headerFilePath, &fstat) < 0) { return true; } - - pVnodeFilesInfo->headFileSize = fstat.st_size; - - return isHeaderFileEmpty(vnodeId, pVnodeFilesInfo->headFileSize); + + pVnodeFilesInfo->headerFileSize = fstat.st_size; + return isHeaderFileEmpty(pVnodeFilesInfo->vnodeId, pVnodeFilesInfo->headerFileSize); } -static void doCloseQueryFileInfoFD(SQueryFilesInfo* pVnodeFilesInfo) { +static void doCloseQueryFileInfoFD(SQueryFilesInfo *pVnodeFilesInfo) { tclose(pVnodeFilesInfo->headerFd); tclose(pVnodeFilesInfo->dataFd); tclose(pVnodeFilesInfo->lastFd); + + pVnodeFilesInfo->current = -1; + pVnodeFilesInfo->headerFileSize = -1; } -static void doInitQueryFileInfoFD(SQueryFilesInfo* pVnodeFilesInfo) { +static void doInitQueryFileInfoFD(SQueryFilesInfo *pVnodeFilesInfo) { pVnodeFilesInfo->current = -1; - pVnodeFilesInfo->headFileSize = -1; - + pVnodeFilesInfo->headerFileSize = -1; + pVnodeFilesInfo->headerFd = FD_INITIALIZER; // set the initial value pVnodeFilesInfo->dataFd = FD_INITIALIZER; pVnodeFilesInfo->lastFd = FD_INITIALIZER; } /* - * clean memory and other corresponding resources are delegated to invoker + * close the opened fd are delegated to invoker */ -static int32_t doOpenQueryFileData(SQInfo* pQInfo, SQueryFilesInfo* pVnodeFileInfo, int32_t vnodeId) { - SHeaderFileInfo* pHeaderFileInfo = &pVnodeFileInfo->pFileInfo[pVnodeFileInfo->current]; - - pVnodeFileInfo->headerFd = open(pVnodeFileInfo->headerFilePath, O_RDONLY); - if (!FD_VALID(pVnodeFileInfo->headerFd)) { - dError("QInfo:%p failed open head file:%s reason:%s", pQInfo, pVnodeFileInfo->headerFilePath, strerror(errno)); - return -1; - } - +static int32_t doOpenQueryFile(SQInfo *pQInfo, SQueryFilesInfo *pVnodeFileInfo) { + SHeaderFileInfo *pHeaderFileInfo = &pVnodeFileInfo->pFileInfo[pVnodeFileInfo->current]; + /* * current header file is empty or broken, return directly. * - * if the header is smaller than a threshold value, this file is empty, no need to open these files - * the header file only to be opened, then the check of file size is available. Otherwise, the file may be - * replaced by new header file when opening the header file and then cause the miss check of file size + * if the header is smaller than or equals to the minimum file size value, this file is empty. No need to open this + * file and the corresponding files. */ - if (checkIsHeaderFileEmpty(pVnodeFileInfo, vnodeId)) { + if (checkIsHeaderFileEmpty(pVnodeFileInfo)) { qTrace("QInfo:%p vid:%d, fileId:%d, index:%d, size:%d, ignore file, empty or broken", pQInfo, - pVnodeFileInfo->vnodeId, pHeaderFileInfo->fileID, pVnodeFileInfo->current, pVnodeFileInfo->headFileSize); - + pVnodeFileInfo->vnodeId, pHeaderFileInfo->fileID, pVnodeFileInfo->current, pVnodeFileInfo->headerFileSize); + return -1; } - + + pVnodeFileInfo->headerFd = open(pVnodeFileInfo->headerFilePath, O_RDONLY); + if (!FD_VALID(pVnodeFileInfo->headerFd)) { + dError("QInfo:%p failed open head file:%s reason:%s", pQInfo, pVnodeFileInfo->headerFilePath, strerror(errno)); + return -1; + } + pVnodeFileInfo->dataFd = open(pVnodeFileInfo->dataFilePath, O_RDONLY); if (!FD_VALID(pVnodeFileInfo->dataFd)) { dError("QInfo:%p failed open data file:%s reason:%s", pQInfo, pVnodeFileInfo->dataFilePath, strerror(errno)); return -1; } - + pVnodeFileInfo->lastFd = open(pVnodeFileInfo->lastFilePath, O_RDONLY); if (!FD_VALID(pVnodeFileInfo->lastFd)) { dError("QInfo:%p failed open last file:%s reason:%s", pQInfo, pVnodeFileInfo->lastFilePath, strerror(errno)); return -1; } - - pVnodeFileInfo->pHeaderFileData = mmap(NULL, pVnodeFileInfo->headFileSize, PROT_READ, MAP_SHARED, - pVnodeFileInfo->headerFd, 0); - - if (pVnodeFileInfo->pHeaderFileData == MAP_FAILED) { - pVnodeFileInfo->pHeaderFileData = NULL; - - doCloseQueryFileInfoFD(pVnodeFileInfo); - doInitQueryFileInfoFD(pVnodeFileInfo); - - dError("QInfo:%p failed to mmap header file:%s, size:%lld, %s", pQInfo, pVnodeFileInfo->headerFilePath, - pVnodeFileInfo->headFileSize, strerror(errno)); - - return -1; - } else { - if (madvise(pVnodeFileInfo->pHeaderFileData, pVnodeFileInfo->headFileSize, MADV_SEQUENTIAL) == -1) { - dError("QInfo:%p failed to advise kernel the usage of header file, reason:%s", pQInfo, strerror(errno)); - } - } - - return TSDB_CODE_SUCCESS; -} -static void doUnmapHeaderFile(SQueryFilesInfo* pVnodeFileInfo) { - munmap(pVnodeFileInfo->pHeaderFileData, pVnodeFileInfo->headFileSize); - pVnodeFileInfo->pHeaderFileData = NULL; - pVnodeFileInfo->headFileSize = -1; + return TSDB_CODE_SUCCESS; } -static void doCloseOpenedFileData(SQueryFilesInfo* pVnodeFileInfo) { +static void doCloseQueryFiles(SQueryFilesInfo *pVnodeFileInfo) { if (pVnodeFileInfo->current >= 0) { - assert(pVnodeFileInfo->current < pVnodeFileInfo->numOfFiles && pVnodeFileInfo->current >= 0); - - doUnmapHeaderFile(pVnodeFileInfo); + + pVnodeFileInfo->headerFileSize = -1; + doCloseQueryFileInfoFD(pVnodeFileInfo); - doInitQueryFileInfoFD(pVnodeFileInfo); } assert(pVnodeFileInfo->current == -1); } /** - * mmap the data file into memory. For each query, only one header file is allowed to mmap into memory, in order to - * avoid too many memory mapped files at the save time to cause OS return the message of "Cannot allocate memory", - * during query processing. + * For each query, only one header file along with corresponding files is opened, in order to + * avoid too many memory files opened at the same time. * * @param pRuntimeEnv * @param fileIndex - * @return the return value may be null, so any invoker needs to check the returned value + * @return -1 failed, 0 success */ -char *vnodeGetHeaderFileData(SQueryRuntimeEnv *pRuntimeEnv, int32_t vnodeId, int32_t fileIndex) { +int32_t vnodeGetHeaderFile(SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIndex) { assert(fileIndex >= 0 && fileIndex < pRuntimeEnv->vnodeFileInfo.numOfFiles); SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); // only for log output SQueryFilesInfo *pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; - - if (pVnodeFileInfo->current != fileIndex || pVnodeFileInfo->pHeaderFileData == NULL) { + + if (pVnodeFileInfo->current != fileIndex) { if (pVnodeFileInfo->current >= 0) { - assert(pVnodeFileInfo->pHeaderFileData != NULL); + assert(pVnodeFileInfo->headerFileSize > 0); } - + // do close the current memory mapped header file and corresponding fd - doCloseOpenedFileData(pVnodeFileInfo); - assert(pVnodeFileInfo->pHeaderFileData == NULL); - + doCloseQueryFiles(pVnodeFileInfo); + assert(pVnodeFileInfo->headerFileSize == -1); + // set current opened file Index pVnodeFileInfo->current = fileIndex; - + // set the current opened files(header, data, last) path - vnodeSetOpenedFileNames(pVnodeFileInfo); - - if (doOpenQueryFileData(pQInfo, pVnodeFileInfo, vnodeId) != TSDB_CODE_SUCCESS) { - doCloseOpenedFileData(pVnodeFileInfo); // there may be partially open fd, close it anyway. - return pVnodeFileInfo->pHeaderFileData; + vnodeSetCurrentFileNames(pVnodeFileInfo); + + if (doOpenQueryFile(pQInfo, pVnodeFileInfo) != TSDB_CODE_SUCCESS) { + doCloseQueryFiles(pVnodeFileInfo); // all the fds may be partially opened, close them anyway. + return -1; } } - return pVnodeFileInfo->pHeaderFileData; + return TSDB_CODE_SUCCESS; } /* @@ -429,7 +457,7 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); - SVnodeCfg * pCfg = &vnodeList[pMeterObj->vnode].cfg; + SVnodeCfg * pCfg = &vnodeList[pMeterObj->vnode].cfg; SHeaderFileInfo *pHeadeFileInfo = &pRuntimeEnv->vnodeFileInfo.pFileInfo[fileIndex]; int64_t st = taosGetTimestampUs(); @@ -445,61 +473,67 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim pSummary->readCompInfo++; pSummary->numOfSeek++; -#if 1 - char *data = vnodeGetHeaderFileData(pRuntimeEnv, pMeterObj->vnode, fileIndex); - if (data == NULL) { + int32_t ret = vnodeGetHeaderFile(pRuntimeEnv, fileIndex); + if (ret != TSDB_CODE_SUCCESS) { return -1; // failed to load the header file data into memory } - -#else - char *data = calloc(1, tmsize + TSDB_FILE_HEADER_LEN); - read(fd, data, tmsize + TSDB_FILE_HEADER_LEN); -#endif + + char * buf = calloc(1, getCompHeaderSegSize(pCfg)); + SQueryFilesInfo *pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; + + lseek(pVnodeFileInfo->headerFd, TSDB_FILE_HEADER_LEN, SEEK_SET); + read(pVnodeFileInfo->headerFd, buf, getCompHeaderSegSize(pCfg)); // check the offset value integrity - if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, data, - getCompHeaderSegSize(pCfg)) < 0) { + if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, + buf - TSDB_FILE_HEADER_LEN, getCompHeaderSegSize(pCfg)) < 0) { + free(buf); return -1; } - int64_t offset = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pMeterObj->sid; - SCompHeader *compHeader = (SCompHeader *)(data + offset); + SCompHeader *compHeader = (SCompHeader *)(buf + sizeof(SCompHeader) * pMeterObj->sid); // no data in this file for specified meter, abort if (compHeader->compInfoOffset == 0) { + free(buf); return 0; } // corrupted file may cause the invalid compInfoOffset, check needs - if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, &pRuntimeEnv->vnodeFileInfo, getCompHeaderStartPosition(pCfg)) < 0) { + if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, &pRuntimeEnv->vnodeFileInfo, + getCompHeaderStartPosition(pCfg)) < 0) { + free(buf); return -1; } -#if 1 - SCompInfo *compInfo = (SCompInfo *)(data + compHeader->compInfoOffset); -#else - lseek(fd, compHeader->compInfoOffset, SEEK_SET); - SCompInfo CompInfo = {0}; - SCompInfo *compInfo = &CompInfo; - read(fd, compInfo, sizeof(SCompInfo)); -#endif + lseek(pVnodeFileInfo->headerFd, compHeader->compInfoOffset, SEEK_SET); + + SCompInfo compInfo = {0}; + read(pVnodeFileInfo->headerFd, &compInfo, sizeof(SCompInfo)); // check compblock info integrity - if (validateCompBlockInfoSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, compInfo, + if (validateCompBlockInfoSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, pMeterObj->vnode, &compInfo, compHeader->compInfoOffset) < 0) { + free(buf); return -1; } - if (compInfo->numOfBlocks <= 0 || compInfo->uid != pMeterObj->uid) { + if (compInfo.numOfBlocks <= 0 || compInfo.uid != pMeterObj->uid) { + free(buf); return 0; } // free allocated SField data vnodeFreeFieldsEx(pRuntimeEnv); - pQuery->numOfBlocks = (int32_t)compInfo->numOfBlocks; + pQuery->numOfBlocks = (int32_t)compInfo.numOfBlocks; - int32_t compBlockSize = compInfo->numOfBlocks * sizeof(SCompBlock); - size_t bufferSize = compBlockSize + POINTER_BYTES * compInfo->numOfBlocks; + /* + * +-------------+-----------+----------------+ + * | comp block | checksum | SField Pointer | + * +-------------+-----------+----------------+ + */ + int32_t compBlockSize = compInfo.numOfBlocks * sizeof(SCompBlock); + size_t bufferSize = compBlockSize + sizeof(TSCKSUM) + POINTER_BYTES * pQuery->numOfBlocks; // prepare buffer to hold compblock data if (pQuery->blockBufferSize != bufferSize) { @@ -507,24 +541,20 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim pQuery->blockBufferSize = (int32_t)bufferSize; } - memset(pQuery->pBlock, 0, (size_t)pQuery->blockBufferSize); + memset(pQuery->pBlock, 0, bufferSize); -#if 1 - memcpy(pQuery->pBlock, (char *)compInfo + sizeof(SCompInfo), (size_t)compBlockSize); - TSCKSUM checksum = *(TSCKSUM *)((char *)compInfo + sizeof(SCompInfo) + compBlockSize); -#else - TSCKSUM checksum; - read(fd, pQuery->pBlock, compBlockSize); - read(fd, &checksum, sizeof(TSCKSUM)); -#endif + // read data: comp block + checksum + read(pVnodeFileInfo->headerFd, pQuery->pBlock, compBlockSize + sizeof(TSCKSUM)); + TSCKSUM checksum = *(TSCKSUM *)((char *)pQuery->pBlock + compBlockSize); // check comp block integrity - if (validateCompBlockSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, compInfo, (char *)pQuery->pBlock, + if (validateCompBlockSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, &compInfo, (char *)pQuery->pBlock, pMeterObj->vnode, checksum) < 0) { + free(buf); return -1; } - pQuery->pFields = (SField **)((char *)pQuery->pBlock + compBlockSize); + pQuery->pFields = (SField **)((char *)pQuery->pBlock + compBlockSize + sizeof(TSCKSUM)); vnodeSetCompBlockInfoLoaded(pRuntimeEnv, fileIndex, pMeterObj->sid); int64_t et = taosGetTimestampUs(); @@ -535,6 +565,7 @@ static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntim pSummary->totalCompInfoSize += compBlockSize; pSummary->loadCompInfoUs += (et - st); + free(buf); return pQuery->numOfBlocks; } @@ -555,7 +586,8 @@ static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t StartQue char *primaryColumnData, int32_t size, int32_t functionId, SField *pField, bool hasNull, int32_t blockStatus, void *param, int32_t scanFlag); -void createGroupResultBuf(SQuery *pQuery, SOutputRes *pOneResult, bool isMetricQuery); +void createQueryResultBuf(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResultRow, bool isSTableQuery, SPosInfo *posInfo); + static void destroyGroupResultBuf(SOutputRes *pOneOutputRes, int32_t nOutputCols); static int32_t binarySearchForBlockImpl(SCompBlock *pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { @@ -735,15 +767,14 @@ static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFilesInfo *pQueryFileInfo // load checksum TSCKSUM checksum = 0; - ret = readDataFromDiskFile(fd, pQInfo, pQueryFileInfo, (char *)&checksum, offset + pFields[col].len, - sizeof(TSCKSUM)); + ret = readDataFromDiskFile(fd, pQInfo, pQueryFileInfo, (char *)&checksum, offset + pFields[col].len, sizeof(TSCKSUM)); if (ret != 0) { return ret; } // check column data integrity if (checksum != taosCalcChecksum(0, (const uint8_t *)dst, pFields[col].len)) { - dLError("QInfo:%p, column data checksum error, file:%s, col: %d, offset:%ld", GET_QINFO_ADDR(pQuery), + dLError("QInfo:%p, column data checksum error, file:%s, col: %d, offset:%" PRId64, GET_QINFO_ADDR(pQuery), pQueryFileInfo->dataFilePath, col, offset); return -1; @@ -758,11 +789,11 @@ static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFilesInfo *pQueryFileInfo } static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SCompBlock *pBlock, SField **pField) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); - SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + SQuery * pQuery = pRuntimeEnv->pQuery; + SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; SQueryFilesInfo *pVnodeFilesInfo = &pRuntimeEnv->vnodeFileInfo; - + size_t size = sizeof(SField) * (pBlock->numOfCols) + sizeof(TSCKSUM); // if *pField != NULL, this block is loaded once, in current query do nothing @@ -785,8 +816,8 @@ static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SCompBlock // check fields integrity if (!taosCheckChecksumWhole((uint8_t *)(*pField), size)) { - dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%lld", pQInfo, - pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pVnodeFilesInfo->dataFilePath, + dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%" PRId64, + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pVnodeFilesInfo->dataFilePath, pBlock->offset); return -1; } @@ -806,6 +837,21 @@ static void fillWithNull(SQuery *pQuery, char *dst, int32_t col, int32_t numOfPo setNullN(dst, type, bytes, numOfPoints); } +static int32_t loadPrimaryTSColumn(SQueryRuntimeEnv *pRuntimeEnv, SCompBlock *pBlock, SField **pField, + int32_t *columnBytes) { + SQuery *pQuery = pRuntimeEnv->pQuery; + assert(PRIMARY_TSCOL_LOADED(pQuery) == false); + + if (columnBytes != NULL) { + (*columnBytes) += (*pField)[PRIMARYKEY_TIMESTAMP_COL_INDEX].len + sizeof(TSCKSUM); + } + + int32_t ret = loadColumnIntoMem(pQuery, &pRuntimeEnv->vnodeFileInfo, pBlock, *pField, PRIMARYKEY_TIMESTAMP_COL_INDEX, + pRuntimeEnv->primaryColBuffer, pRuntimeEnv->unzipBuffer, + pRuntimeEnv->secondaryUnzipBuffer, pRuntimeEnv->unzipBufSize); + return ret; +} + static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIdx, bool loadPrimaryCol, bool loadSField) { int32_t i = 0, j = 0; @@ -815,16 +861,40 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR SData ** sdata = pRuntimeEnv->colDataBuffer; assert(fileIdx == pRuntimeEnv->vnodeFileInfo.current); - - SData ** primaryTSBuf = &pRuntimeEnv->primaryColBuffer; - void * tmpBuf = pRuntimeEnv->unzipBuffer; - if (vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIdx)) { - dTrace("QInfo:%p vid:%d sid:%d id:%s, data block has been loaded, ts:%d, slot:%d, brange:%lld-%lld, rows:%d", - GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, loadPrimaryCol, pQuery->slot, - pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); + SData **primaryTSBuf = &pRuntimeEnv->primaryColBuffer; + void * tmpBuf = pRuntimeEnv->unzipBuffer; + int32_t columnBytes = 0; - return 0; + SQueryCostSummary *pSummary = &pRuntimeEnv->summary; + + int32_t status = vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); + if (status == DISK_BLOCK_NO_NEED_TO_LOAD) { + dTrace( + "QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, no need to load again, ts:%d, slot:%d," + " brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, loadPrimaryCol, + pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); + + if (loadSField && (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL)) { + loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, &pQuery->pFields[pQuery->slot]); + } + + return TSDB_CODE_SUCCESS; + } else if (status == DISK_BLOCK_LOAD_TS) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, incrementally load ts", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId); + + assert(PRIMARY_TSCOL_LOADED(pQuery) == false && loadSField == true); + if (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL) { + loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, &pQuery->pFields[pQuery->slot]); + } + + // load primary timestamp + int32_t ret = loadPrimaryTSColumn(pRuntimeEnv, pBlock, pField, &columnBytes); + + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); + return ret; } /* failed to load fields info, return with error info */ @@ -832,21 +902,15 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR return -1; } - SQueryCostSummary *pSummary = &pRuntimeEnv->summary; - int32_t columnBytes = 0; - int64_t st = taosGetTimestampUs(); if (loadPrimaryCol) { if (PRIMARY_TSCOL_LOADED(pQuery)) { *primaryTSBuf = sdata[0]; } else { - columnBytes += (*pField)[PRIMARYKEY_TIMESTAMP_COL_INDEX].len + sizeof(TSCKSUM); - int32_t ret = - loadColumnIntoMem(pQuery, &pRuntimeEnv->vnodeFileInfo, pBlock, *pField, PRIMARYKEY_TIMESTAMP_COL_INDEX, *primaryTSBuf, - tmpBuf, pRuntimeEnv->secondaryUnzipBuffer, pRuntimeEnv->unzipBufSize); - if (ret != 0) { - return -1; + int32_t ret = loadPrimaryTSColumn(pRuntimeEnv, pBlock, pField, &columnBytes); + if (ret != TSDB_CODE_SUCCESS) { + return ret; } pSummary->numOfSeek++; @@ -920,12 +984,12 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR pSummary->loadBlocksUs += (et - st); pSummary->readDiskBlocks++; - vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx); + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx, loadPrimaryCol); return ret; } // todo ignore the blockType, pass the pQuery into this function -SBlockInfo getBlockBasicInfo(void *pBlock, int32_t blockType) { +SBlockInfo getBlockBasicInfo(SQueryRuntimeEnv *pRuntimeEnv, void *pBlock, int32_t blockType) { SBlockInfo blockInfo = {0}; if (IS_FILE_BLOCK(blockType)) { SCompBlock *pDiskBlock = (SCompBlock *)pBlock; @@ -937,8 +1001,8 @@ SBlockInfo getBlockBasicInfo(void *pBlock, int32_t blockType) { } else { SCacheBlock *pCacheBlock = (SCacheBlock *)pBlock; - blockInfo.keyFirst = getTimestampInCacheBlock(pCacheBlock, 0); - blockInfo.keyLast = getTimestampInCacheBlock(pCacheBlock, pCacheBlock->numOfPoints - 1); + blockInfo.keyFirst = getTimestampInCacheBlock(pRuntimeEnv, pCacheBlock, 0); + blockInfo.keyLast = getTimestampInCacheBlock(pRuntimeEnv, pCacheBlock, pCacheBlock->numOfPoints - 1); blockInfo.size = pCacheBlock->numOfPoints; blockInfo.numOfCols = pCacheBlock->pMeterObj->numOfColumns; } @@ -971,7 +1035,7 @@ static bool checkQueryRangeAgainstNextBlock(SBlockInfo *pBlockInfo, SQueryRuntim */ static bool queryCompleteInBlock(SQuery *pQuery, SBlockInfo *pBlockInfo, int32_t forwardStep) { if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) { - assert(pQuery->checkBufferInLoop == 1 && pQuery->over == QUERY_RESBUF_FULL && pQuery->pointsOffset == 0); + // assert(pQuery->checkBufferInLoop == 1 && pQuery->over == QUERY_RESBUF_FULL && pQuery->pointsOffset == 0); assert((QUERY_IS_ASC_QUERY(pQuery) && forwardStep + pQuery->pos <= pBlockInfo->size) || (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->pos - forwardStep + 1 >= 0)); @@ -1008,36 +1072,169 @@ void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, in position->pos = pos; } -static FORCE_INLINE void saveNextAccessPositionInCache(SPositionInfo *position, int32_t slotIdx, int32_t pos) { - savePointPosition(position, -1, slotIdx, pos); +bool isCacheBlockValid(SQuery *pQuery, SCacheBlock *pBlock, SMeterObj *pMeterObj) { + if (pMeterObj != pBlock->pMeterObj || pBlock->blockId > pQuery->blockId) { + SMeterObj *pNewMeterObj = pBlock->pMeterObj; + char * id = (pNewMeterObj != NULL) ? pNewMeterObj->meterId : NULL; + + dWarn( + "QInfo:%p vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d, meterObj:%p, " + "blockMeterObj:%p, blockMeter id:%s, first:%d, last:%d, numOfBlocks:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->blockId, + pQuery->blockId, pMeterObj, pNewMeterObj, id, pQuery->firstSlot, pQuery->currentSlot, pQuery->numOfBlocks); + + return false; + } + + /* + * The check for empty block: + * pBlock->numOfPoints == 0. There is a empty block, which is caused by allocate-and-write data into cache + * procedure. The block has been allocated but data has not been put into yet. If the block is the last + * block(newly allocated block), abort query. Otherwise, skip it and go on. + */ + if (pBlock->numOfPoints == 0) { + dWarn( + "QInfo:%p vid:%d sid:%d id:%s, cache block is empty. slot:%d first:%d, last:%d, numOfBlocks:%d," + "allocated but not write data yet.", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pQuery->firstSlot, + pQuery->currentSlot, pQuery->numOfBlocks); + + return false; + } + + return true; } // todo all functions that call this function should check the returned data blocks status -SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t slot) { +SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntimeEnv, int32_t slot) { + SQuery *pQuery = pRuntimeEnv->pQuery; + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; - if (pCacheInfo == NULL || pCacheInfo->cacheBlocks == NULL || slot < 0) { + if (pCacheInfo == NULL || pCacheInfo->cacheBlocks == NULL || slot < 0 || slot >= pCacheInfo->maxBlocks) { return NULL; } - assert(slot < pCacheInfo->maxBlocks); + getBasicCacheInfoSnapshot(pQuery, pCacheInfo, pMeterObj->vnode); SCacheBlock *pBlock = pCacheInfo->cacheBlocks[slot]; - if (pBlock == NULL) { - dError("QInfo:%p NULL Block In Cache, available block:%d, last block:%d, accessed null block:%d, pBlockId:%d", - GET_QINFO_ADDR(pQuery), pCacheInfo->numOfBlocks, pCacheInfo->currentSlot, slot, pQuery->blockId); + if (pBlock == NULL) { // the cache info snapshot must be existed. + int32_t curNumOfBlocks = pCacheInfo->numOfBlocks; + int32_t curSlot = pCacheInfo->currentSlot; + + dError( + "QInfo:%p NULL Block In Cache, snapshot (available blocks:%d, last block:%d), current (available blocks:%d, " + "last block:%d), accessed null block:%d, pBlockId:%d", + GET_QINFO_ADDR(pQuery), pQuery->numOfBlocks, pQuery->currentSlot, curNumOfBlocks, curSlot, slot, + pQuery->blockId); + return NULL; } - if (pMeterObj != pBlock->pMeterObj || pBlock->blockId > pQuery->blockId) { - dWarn( - "QInfo:%p vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d, meterObj:%p, " - "blockMeterObj:%p", - GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->blockId, - pQuery->blockId, pMeterObj, pBlock->pMeterObj); + // block is empty or block does not belongs to current table, return NULL value + if (!isCacheBlockValid(pQuery, pBlock, pMeterObj)) { + return NULL; + } + + // the accessed cache block has been loaded already, return directly + if (vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, -1, true) == DISK_BLOCK_NO_NEED_TO_LOAD) { + TSKEY skey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, 0); + TSKEY ekey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, pBlock->numOfPoints - 1); + + dTrace( + "QInfo:%p vid:%d sid:%d id:%s, fileId:%d, cache block has been loaded, no need to load again, ts:%d, " + "slot:%d, brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, 1, pQuery->slot, + skey, ekey, pBlock->numOfPoints); + + return &pRuntimeEnv->cacheBlock; + } + + // keep the structure as well as the block data into local buffer + memcpy(&pRuntimeEnv->cacheBlock, pBlock, sizeof(SCacheBlock)); + + SCacheBlock *pNewBlock = &pRuntimeEnv->cacheBlock; + + // the commit data points will be ignored + int32_t offset = 0; + int32_t numOfPoints = pNewBlock->numOfPoints; + if (pQuery->firstSlot == pQuery->commitSlot) { + assert(pQuery->commitPoint >= 0 && pQuery->commitPoint <= pNewBlock->numOfPoints); + + offset = pQuery->commitPoint; + numOfPoints = pNewBlock->numOfPoints - offset; + + if (offset != 0) { + dTrace( + "%p ignore the data in cache block that are commit already, numOfblock:%d slot:%d ignore points:%d. " + "first:%d last:%d", + GET_QINFO_ADDR(pQuery), pQuery->numOfBlocks, pQuery->slot, pQuery->commitPoint, pQuery->firstSlot, + pQuery->currentSlot); + } + + pNewBlock->numOfPoints = numOfPoints; + + // current block are all commit already, ignore it + if (pNewBlock->numOfPoints == 0) { + dTrace( + "%p ignore current in cache block that are all commit already, numOfblock:%d slot:%d" + "first:%d last:%d", + GET_QINFO_ADDR(pQuery), pQuery->numOfBlocks, pQuery->slot, pQuery->firstSlot, pQuery->currentSlot); + return NULL; + } + } + + // keep the data from in cache into the temporarily allocated buffer + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + SColumnInfoEx *pColumnInfoEx = &pQuery->colList[i]; + + int16_t columnIndex = pColumnInfoEx->colIdx; + int16_t columnIndexInBuf = pColumnInfoEx->colIdxInBuf; + + SColumn *pCol = &pMeterObj->schema[columnIndex]; + + int16_t bytes = pCol->bytes; + int16_t type = pCol->type; + + char *dst = pRuntimeEnv->colDataBuffer[columnIndexInBuf]->data; + + if (pQuery->colList[i].colIdx != -1) { + assert(pCol->colId == pQuery->colList[i].data.colId && bytes == pColumnInfoEx->data.bytes && + type == pColumnInfoEx->data.type); + + memcpy(dst, pBlock->offset[columnIndex] + offset * bytes, numOfPoints * bytes); + } else { + setNullN(dst, type, bytes, numOfPoints); + } + } + + assert(numOfPoints == pNewBlock->numOfPoints); + + // if the primary timestamp are not loaded by default, always load it here into buffer + if (!PRIMARY_TSCOL_LOADED(pQuery)) { + memcpy(pRuntimeEnv->primaryColBuffer->data, pBlock->offset[0] + offset * TSDB_KEYSIZE, TSDB_KEYSIZE * numOfPoints); + } + + pQuery->fileId = -1; + pQuery->slot = slot; + + if (!isCacheBlockValid(pQuery, pNewBlock, pMeterObj)) { return NULL; } - return pBlock; + /* + * the accessed cache block still belongs to current meterObj, go on + * update the load data block info + */ + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, -1, true); + + TSKEY skey = getTimestampInCacheBlock(pRuntimeEnv, pNewBlock, 0); + TSKEY ekey = getTimestampInCacheBlock(pRuntimeEnv, pNewBlock, numOfPoints - 1); + + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, load cache block, ts:%d, slot:%d, brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, 1, pQuery->slot, + skey, ekey, numOfPoints); + + return pNewBlock; } static SCompBlock *getDiskDataBlock(SQuery *pQuery, int32_t slot) { @@ -1045,17 +1242,19 @@ static SCompBlock *getDiskDataBlock(SQuery *pQuery, int32_t slot) { return &pQuery->pBlock[slot]; } -static void *getGenericDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t slot) { +static void *getGenericDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntimeEnv, int32_t slot) { + SQuery *pQuery = pRuntimeEnv->pQuery; + if (IS_DISK_DATA_BLOCK(pQuery)) { return getDiskDataBlock(pQuery, slot); } else { - return getCacheDataBlock(pMeterObj, pQuery, slot); + return getCacheDataBlock(pMeterObj, pRuntimeEnv, slot); } } static int32_t getFileIdFromKey(int32_t vid, TSKEY key) { SVnodeObj *pVnode = &vnodeList[vid]; - int64_t delta = (int64_t)pVnode->cfg.daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + int64_t delta = (int64_t)pVnode->cfg.daysPerFile * tsMsPerDay[(uint8_t)pVnode->cfg.precision]; return (int32_t)(key / delta); // set the starting fileId } @@ -1110,7 +1309,7 @@ static bool getQualifiedDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRunti // load first data block into memory failed, caused by disk block error bool blockLoaded = false; - while (blkIdx < pQuery->numOfBlocks) { + while (blkIdx < pQuery->numOfBlocks && blkIdx >= 0) { pQuery->slot = blkIdx; if (loadDataBlockIntoMem(&pQuery->pBlock[pQuery->slot], &pQuery->pFields[pQuery->slot], pRuntimeEnv, fid, true, true) == 0) { @@ -1138,14 +1337,6 @@ static bool getQualifiedDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRunti return true; } -static char *doGetDataBlockImpl(const char *sdata, int32_t colIdx, bool isDiskFileBlock) { - if (isDiskFileBlock) { - return ((SData **)sdata)[colIdx]->data; - } else { - return ((SCacheBlock *)sdata)->offset[colIdx]; - } -} - static SField *getFieldInfo(SQuery *pQuery, SBlockInfo *pBlockInfo, SField *pFields, int32_t column) { // no SField info exist, or column index larger than the output column, no result. if (pFields == NULL || column >= pQuery->numOfOutputCols) { @@ -1196,30 +1387,13 @@ static bool hasNullVal(SQuery *pQuery, int32_t col, SBlockInfo *pBlockInfo, SFie return ret; } -static char *doGetDataBlocks(bool isDiskFileBlock, SQueryRuntimeEnv *pRuntimeEnv, char *data, int32_t colIdx, - int32_t colId, int16_t type, int16_t bytes, int32_t tmpBufIndex) { - char *pData = NULL; - - if (isDiskFileBlock) { - pData = doGetDataBlockImpl(data, colIdx, isDiskFileBlock); - } else { - SCacheBlock *pCacheBlock = (SCacheBlock *)data; - SMeterObj * pMeter = pRuntimeEnv->pMeterObj; - - if (colIdx < 0 || pMeter->numOfColumns <= colIdx || pMeter->schema[colIdx].colId != colId) { - // data in cache is not current available, we need fill the data block in null value - pData = pRuntimeEnv->colDataBuffer[tmpBufIndex]->data; - setNullN(pData, type, bytes, pCacheBlock->numOfPoints); - } else { - pData = doGetDataBlockImpl(data, colIdx, isDiskFileBlock); - } - } - +static char *doGetDataBlocks(SQuery *pQuery, SData **data, int32_t colIdx) { + assert(colIdx >= 0 && colIdx < pQuery->numOfCols); + char *pData = data[colIdx]->data; return pData; } -static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeticSupport *sas, int32_t col, - int32_t size, bool isDiskFileBlock) { +static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size) { SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; @@ -1238,21 +1412,17 @@ static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeti } for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - int32_t colIdx = isDiskFileBlock ? pQuery->colList[i].colIdxInBuf : pQuery->colList[i].colIdx; - SColumnInfo *pColMsg = &pQuery->colList[i].data; - char * pData = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pColMsg->colId, pColMsg->type, - pColMsg->bytes, pQuery->colList[i].colIdxInBuf); + char * pData = doGetDataBlocks(pQuery, pRuntimeEnv->colDataBuffer, pQuery->colList[i].colIdxInBuf); sas->elemSize[i] = pColMsg->bytes; sas->data[i] = pData + pCtx->startOffset * sas->elemSize[i]; // start from the offset } + sas->numOfCols = pQuery->numOfCols; sas->offset = 0; } else { // other type of query function SColIndexEx *pCol = &pQuery->pSelectExpr[col].pBase.colInfo; - int32_t colIdx = isDiskFileBlock ? pCol->colIdxInBuf : pCol->colIdx; - if (TSDB_COL_IS_TAG(pCol->flag)) { dataBlock = NULL; } else { @@ -1261,8 +1431,7 @@ static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeti * the remain meter may not have the required column in cache actually. * So, the validation of required column in cache with the corresponding meter schema is reinforced. */ - dataBlock = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pCol->colId, pCtx[col].inputType, - pCtx[col].inputBytes, pCol->colIdxInBuf); + dataBlock = doGetDataBlocks(pQuery, pRuntimeEnv->colDataBuffer, pCol->colIdxInBuf); } } @@ -1274,31 +1443,28 @@ static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeti * @param pRuntimeEnv * @param forwardStep * @param primaryKeyCol - * @param data * @param pFields * @param isDiskFileBlock * @return the incremental number of output value, so it maybe 0 for fixed number of query, * such as count/min/max etc. */ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t forwardStep, TSKEY *primaryKeyCol, - char *data, SField *pFields, SBlockInfo *pBlockInfo, bool isDiskFileBlock) { + SField *pFields, SBlockInfo *pBlockInfo) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQuery * pQuery = pRuntimeEnv->pQuery; + bool isDiskFileBlock = IS_FILE_BLOCK(pRuntimeEnv->blockStatus); int64_t prevNumOfRes = getNumOfResult(pRuntimeEnv); SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutputCols, sizeof(SArithmeticSupport)); for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; - // if (!functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - // continue; - // } SField dummyField = {0}; bool hasNull = hasNullVal(pQuery, k, pBlockInfo, pFields, isDiskFileBlock); - char *dataBlock = getDataBlocks(pRuntimeEnv, data, &sasArray[k], k, forwardStep, isDiskFileBlock); + char *dataBlock = getDataBlocks(pRuntimeEnv, &sasArray[k], k, forwardStep); SField *tpField = NULL; @@ -1314,12 +1480,9 @@ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t } } - TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; - - int64_t alignedTimestamp = - taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); - setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, forwardStep, functionId, - tpField, hasNull, pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); + TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pRuntimeEnv->intervalWindow.skey : pRuntimeEnv->intervalWindow.ekey; + setExecParams(pQuery, &pCtx[k], ts, dataBlock, (char *)primaryKeyCol, forwardStep, functionId, tpField, hasNull, + pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); } /* @@ -1372,7 +1535,7 @@ static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx * if (!vnodeSupportPrefilter(pFilterInfo->info.data.type)) { continue; } - + // all points in current column are NULL, no need to check its boundary value if (pField[colIndex].numOfNullPoints == numOfTotalPoints) { continue; @@ -1407,104 +1570,300 @@ static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx * return true; } -static int32_t setGroupResultForKey(SQueryRuntimeEnv *pRuntimeEnv, char *pData, int16_t type, char *columnData) { - SOutputRes *pOutputRes = NULL; +static SOutputRes *doSetSlidingWindowFromKey(SSlidingWindowInfo *pSlidingWindowInfo, char *pData, int16_t bytes, + SWindowStatus **pStatus) { + int32_t p = -1; - // ignore the null value - if (isNull(pData, type)) { - return -1; - } + int32_t *p1 = (int32_t *)taosGetDataFromHashTable(pSlidingWindowInfo->hashList, pData, bytes); + if (p1 != NULL) { + p = *p1; - int64_t t = 0; - switch (type) { - case TSDB_DATA_TYPE_TINYINT: - t = GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - t = GET_INT64_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - t = GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - default: - t = GET_INT32_VAL(pData); - break; - } + pSlidingWindowInfo->curIndex = p; + if (pStatus != NULL) { + *pStatus = &pSlidingWindowInfo->pStatus[p]; + } + } else { // more than the capacity, reallocate the resources + if (pSlidingWindowInfo->size >= pSlidingWindowInfo->capacity) { + int64_t newCap = pSlidingWindowInfo->capacity * 2; - SOutputRes **p1 = (SOutputRes **)taosGetIntHashData(pRuntimeEnv->hashList, t); - if (p1 != NULL) { - pOutputRes = *p1; - } else { - // more than the threshold number, discard data that are not belong to current groups - if (pRuntimeEnv->usedIndex >= 10000) { - return -1; + char *t = realloc(pSlidingWindowInfo->pStatus, newCap * sizeof(SWindowStatus)); + if (t != NULL) { + pSlidingWindowInfo->pStatus = (SWindowStatus *)t; + memset(&pSlidingWindowInfo->pStatus[pSlidingWindowInfo->capacity], 0, sizeof(SWindowStatus) * pSlidingWindowInfo->capacity); + } else { + // todo + } + + pSlidingWindowInfo->capacity = newCap; } // add a new result set for a new group - char *b = (char *)&pRuntimeEnv->pResult[pRuntimeEnv->usedIndex++]; - pOutputRes = *(SOutputRes **)taosAddIntHash(pRuntimeEnv->hashList, t, (char *)&b); - } + if (pStatus != NULL) { + *pStatus = &pSlidingWindowInfo->pStatus[pSlidingWindowInfo->size]; + } - setGroupOutputBuffer(pRuntimeEnv, pOutputRes); - initCtxOutputBuf(pRuntimeEnv); + p = pSlidingWindowInfo->size; + pSlidingWindowInfo->curIndex = pSlidingWindowInfo->size; - return TSDB_CODE_SUCCESS; + pSlidingWindowInfo->size += 1; + taosAddToHashTable(pSlidingWindowInfo->hashList, pData, bytes, (char *)&pSlidingWindowInfo->curIndex, sizeof(int32_t)); + } + + return &pSlidingWindowInfo->pResult[p]; } -static char *getGroupbyColumnData(SQueryRuntimeEnv *pRuntimeEnv, SField *pFields, SBlockInfo *pBlockInfo, char *data, - bool isDiskFileBlock, int16_t *type, int16_t *bytes) { - SQuery *pQuery = pRuntimeEnv->pQuery; - char * groupbyColumnData = NULL; +static int32_t initSlidingWindowInfo(SSlidingWindowInfo *pSlidingWindowInfo, int32_t threshold, int16_t type, int32_t rowSizes, + SOutputRes *pRes) { + pSlidingWindowInfo->capacity = threshold; + pSlidingWindowInfo->threshold = threshold; - int32_t col = 0; - int16_t colIndexInBuf = 0; + pSlidingWindowInfo->type = type; - SSqlGroupbyExpr *pGroupbyExpr = pQuery->pGroupbyExpr; + _hash_fn_t fn = taosGetDefaultHashFunction(type); + pSlidingWindowInfo->hashList = taosInitHashTable(threshold, fn, false); - for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) { - if (pGroupbyExpr->columnInfo[k].flag == TSDB_COL_TAG) { - continue; + pSlidingWindowInfo->curIndex = -1; + pSlidingWindowInfo->size = 0; + pSlidingWindowInfo->pResult = pRes; + +// createResultBuf(&pSlidingWindowInfo->pResultBuf, 10, rowSizes); + + pSlidingWindowInfo->pStatus = calloc(threshold, sizeof(SWindowStatus)); +// pSlidingWindowInfo->pResultInfo = calloc(threshold, POINTER_BYTES); + +// for(int32_t i = 0; i < threshold; ++i) { +// pSlidingWindowInfo->pResultInfo[i] = calloc((size_t)numOfOutput, sizeof(SResultInfo)); + + +// } + + if (pSlidingWindowInfo->pStatus == NULL || pSlidingWindowInfo->hashList == NULL) { + return -1; + } + + return TSDB_CODE_SUCCESS; +} + +static void destroySlidingWindowInfo(SSlidingWindowInfo *pSlidingWindowInfo) { + if (pSlidingWindowInfo == NULL || pSlidingWindowInfo->capacity == 0) { + assert(pSlidingWindowInfo->hashList == NULL && pSlidingWindowInfo->pResult == NULL); + return; + } + + taosCleanUpHashTable(pSlidingWindowInfo->hashList); +// destroyResultBuf(pSlidingWindowInfo->pResultBuf); + + tfree(pSlidingWindowInfo->pStatus); +} + +void resetSlidingWindowInfo(SQueryRuntimeEnv *pRuntimeEnv, SSlidingWindowInfo *pSlidingWindowInfo) { + if (pSlidingWindowInfo == NULL || pSlidingWindowInfo->capacity == 0) { + return; + } + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *pOneRes = &pSlidingWindowInfo->pResult[i]; + clearGroupResultBuf(pRuntimeEnv, pOneRes); + } + + memset(pSlidingWindowInfo->pStatus, 0, sizeof(SWindowStatus) * pSlidingWindowInfo->capacity); + + pSlidingWindowInfo->curIndex = -1; + taosCleanUpHashTable(pSlidingWindowInfo->hashList); + pSlidingWindowInfo->size = 0; + + _hash_fn_t fn = taosGetDefaultHashFunction(pSlidingWindowInfo->type); + pSlidingWindowInfo->hashList = taosInitHashTable(pSlidingWindowInfo->capacity, fn, false); + + pSlidingWindowInfo->startTime = 0; + pSlidingWindowInfo->prevSKey = 0; +} + +void clearCompletedSlidingWindows(SQueryRuntimeEnv* pRuntimeEnv) { + SSlidingWindowInfo* pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + if (pSlidingWindowInfo == NULL || pSlidingWindowInfo->capacity == 0 || pSlidingWindowInfo->size == 0) { + return; + } + + int32_t i = 0; + for (i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if (pStatus->closed) { // remove the window slot from hash table + taosDeleteFromHashTable(pSlidingWindowInfo->hashList, (const char *)&pStatus->window.skey, TSDB_KEYSIZE); + } else { + break; } + } - int32_t colId = pGroupbyExpr->columnInfo[k].colId; + if (i == 0) { + return; + } - if (isDiskFileBlock) { // get the required column data in file block according the column ID - for (int32_t i = 0; i < pBlockInfo->numOfCols; ++i) { - if (colId == pFields[i].colId) { - *type = pFields[i].type; - *bytes = pFields[i].bytes; - col = i; - break; - } - } + int32_t remain = pSlidingWindowInfo->size - i; + + //clear remain list + memmove(pSlidingWindowInfo->pStatus, &pSlidingWindowInfo->pStatus[i], remain * sizeof(SWindowStatus)); + memset(&pSlidingWindowInfo->pStatus[remain], 0, (pSlidingWindowInfo->capacity - remain) * sizeof(SWindowStatus)); + + for(int32_t k = 0; k < remain; ++k) { + copyGroupResultBuf(pRuntimeEnv, &pSlidingWindowInfo->pResult[k], &pSlidingWindowInfo->pResult[i + k]); + } + + for(int32_t k = remain; k < pSlidingWindowInfo->size; ++k) { + SOutputRes *pOneRes = &pSlidingWindowInfo->pResult[k]; + clearGroupResultBuf(pRuntimeEnv, pOneRes); + } - // this column may not in current data block and also not in the required columns list - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - if (colId == pQuery->colList[i].data.colId) { - colIndexInBuf = i; - break; - } + pSlidingWindowInfo->size = remain; + + for(int32_t k = 0; k < pSlidingWindowInfo->size; ++k) { + SWindowStatus* pStatus = &pSlidingWindowInfo->pStatus[k]; + int32_t *p = (int32_t*) taosGetDataFromHashTable(pSlidingWindowInfo->hashList, (const char*)&pStatus->window.skey, TSDB_KEYSIZE); + int32_t v = *p; + v = (v - i); + + taosDeleteFromHashTable(pSlidingWindowInfo->hashList, (const char *)&pStatus->window.skey, TSDB_KEYSIZE); + + taosAddToHashTable(pSlidingWindowInfo->hashList, (const char*)&pStatus->window.skey, TSDB_KEYSIZE, + (char *)&v, sizeof(int32_t)); + } + + pSlidingWindowInfo->curIndex = -1; +} + +int32_t numOfClosedSlidingWindow(SSlidingWindowInfo *pSlidingWindowInfo) { + int32_t i = 0; + while(i < pSlidingWindowInfo->size && pSlidingWindowInfo->pStatus[i].closed) { + ++i; + } + + return i; +} + +void closeSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo, int32_t slot) { + assert(slot >= 0 && slot < pSlidingWindowInfo->size); + SWindowStatus* pStatus = &pSlidingWindowInfo->pStatus[slot]; + pStatus->closed = true; +} + +void closeAllSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo) { + assert(pSlidingWindowInfo->size >=0 && pSlidingWindowInfo->capacity >= pSlidingWindowInfo->size); + + for(int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus* pStatus = &pSlidingWindowInfo->pStatus[i]; + pStatus->closed = true; + } +} + +static SWindowStatus* getSlidingWindowStatus(SSlidingWindowInfo *pSlidingWindowInfo, int32_t slot) { + return &pSlidingWindowInfo->pStatus[slot]; +} + +static bool slidingWindowClosed(SSlidingWindowInfo* pSlidingWindowInfo, int32_t slot) { + return (pSlidingWindowInfo->pStatus[slot].closed == true); +} + +static int32_t curSlidingWindow(SSlidingWindowInfo *pSlidingWindowInfo) { + assert(pSlidingWindowInfo->curIndex >= 0 && pSlidingWindowInfo->curIndex < pSlidingWindowInfo->size); + + return pSlidingWindowInfo->curIndex; +} + +// get the correct sliding window according to the handled timestamp +static STimeWindow getActiveSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo, int64_t ts, SQuery* pQuery) { + STimeWindow w = {0}; + + if (pSlidingWindowInfo->curIndex == -1) { // the first window, from the prevous stored value + w.skey = pSlidingWindowInfo->prevSKey; + w.ekey = w.skey + pQuery->nAggTimeInterval - 1; + + } else { + SWindowStatus* pStatus = getSlidingWindowStatus(pSlidingWindowInfo, curSlidingWindow(pSlidingWindowInfo)); + + if (pStatus->window.skey <= ts && pStatus->window.ekey >= ts) { + w = pStatus->window; + } else { + int64_t st = pStatus->window.skey; + + while (st > ts) { + st -= pQuery->slidingTime; + } + + while ((st + pQuery->nAggTimeInterval - 1) < ts) { + st += pQuery->slidingTime; } - } else { // get the required data column in cache - SColumn *pSchema = pRuntimeEnv->pMeterObj->schema; + + w.skey = st; + w.ekey = w.skey + pQuery->nAggTimeInterval - 1; + } + } + + assert(ts >= w.skey && ts <= w.ekey); + return w; +} - for (int32_t i = 0; i < pRuntimeEnv->pMeterObj->numOfColumns; ++i) { - if (colId == pSchema[i].colId) { - *type = pSchema[i].type; - *bytes = pSchema[i].bytes; +static int32_t setGroupResultFromKey(SQueryRuntimeEnv *pRuntimeEnv, char *pData, int16_t type, int16_t bytes) { + if (isNull(pData, type)) { // ignore the null value + return -1; + } - col = i; - colIndexInBuf = i; - break; - } + SOutputRes *pOutputRes = doSetSlidingWindowFromKey(&pRuntimeEnv->swindowResInfo, pData, bytes, NULL); + if (pOutputRes == NULL) { + return -1; + } + + setGroupOutputBuffer(pRuntimeEnv, pOutputRes); + initCtxOutputBuf(pRuntimeEnv); + + return TSDB_CODE_SUCCESS; +} + +static int32_t setSlidingWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pTimeWindow) { + assert(pTimeWindow->skey < pTimeWindow->ekey); + + int64_t st = pTimeWindow->skey; + + SWindowStatus *pStatus = NULL; + SOutputRes* pOutputRes = doSetSlidingWindowFromKey(&pRuntimeEnv->swindowResInfo, (char *)&st, TSDB_KEYSIZE, + &pStatus); + + if (pOutputRes == NULL) { + return -1; + } + + pStatus->window = *pTimeWindow; + setGroupOutputBuffer(pRuntimeEnv, pOutputRes); + initCtxOutputBuf(pRuntimeEnv); + + return TSDB_CODE_SUCCESS; +} + +static char *getGroupbyColumnData(SQuery *pQuery, SData **data, int16_t *type, int16_t *bytes) { + char *groupbyColumnData = NULL; + + SSqlGroupbyExpr *pGroupbyExpr = pQuery->pGroupbyExpr; + + for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) { + if (pGroupbyExpr->columnInfo[k].flag == TSDB_COL_TAG) { + continue; + } + + int16_t colIndex = -1; + int32_t colId = pGroupbyExpr->columnInfo[k].colId; + + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + if (pQuery->colList[i].data.colId == colId) { + colIndex = i; + break; } } - int32_t columnIndex = isDiskFileBlock ? colIndexInBuf : col; - groupbyColumnData = - doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, columnIndex, colId, *type, *bytes, colIndexInBuf); + assert(colIndex >= 0 && colIndex < pQuery->numOfCols); + *type = pQuery->colList[colIndex].data.type; + *bytes = pQuery->colList[colIndex].data.bytes; + + groupbyColumnData = doGetDataBlocks(pQuery, data, pQuery->colList[colIndex].colIdxInBuf); break; } @@ -1525,7 +1884,8 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { TSKEY key = *(TSKEY *)(pCtx[0].aInputElemBuf + TSDB_KEYSIZE * offset); #if defined(_DEBUG_VIEW) - printf("elem in comp ts file:%lld, key:%lld, tag:%d, id:%s, query order:%d, ts order:%d, traverse:%d, index:%d\n", + printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 + ", tag:%d, id:%s, query order:%d, ts order:%d, traverse:%d, index:%d\n", elem.ts, key, elem.tag, pRuntimeEnv->pMeterObj->meterId, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder, pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex); #endif @@ -1554,6 +1914,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx return false; } + // in the supplementary scan, only the following functions need to be executed if (!IS_MASTER_SCAN(pRuntimeEnv) && !(functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) { @@ -1564,10 +1925,13 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx } static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t *forwardStep, TSKEY *primaryKeyCol, - char *data, SField *pFields, SBlockInfo *pBlockInfo, bool isDiskFileBlock) { + SField *pFields, SBlockInfo *pBlockInfo) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQuery * pQuery = pRuntimeEnv->pQuery; + bool isDiskFileBlock = IS_FILE_BLOCK(pRuntimeEnv->blockStatus); + SData **data = pRuntimeEnv->colDataBuffer; + int64_t prevNumOfRes = 0; bool groupbyStateValue = isGroupbyNormalCol(pQuery->pGroupbyExpr); @@ -1582,35 +1946,28 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * char *groupbyColumnData = NULL; if (groupbyStateValue) { - groupbyColumnData = getGroupbyColumnData(pRuntimeEnv, pFields, pBlockInfo, data, isDiskFileBlock, &type, &bytes); + groupbyColumnData = getGroupbyColumnData(pQuery, data, &type, &bytes); } for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; bool hasNull = hasNullVal(pQuery, k, pBlockInfo, pFields, isDiskFileBlock); - char *dataBlock = getDataBlocks(pRuntimeEnv, data, &sasArray[k], k, *forwardStep, isDiskFileBlock); + char *dataBlock = getDataBlocks(pRuntimeEnv, &sasArray[k], k, *forwardStep); - TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; - int64_t alignedTimestamp = - taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); - - setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, (*forwardStep), functionId, - pFields, hasNull, pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); + TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pRuntimeEnv->intervalWindow.skey : pRuntimeEnv->intervalWindow.ekey; + setExecParams(pQuery, &pCtx[k], ts, dataBlock, (char *)primaryKeyCol, (*forwardStep), functionId, pFields, hasNull, + pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); } // set the input column data for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; - int32_t colIdx = isDiskFileBlock ? pFilterInfo->info.colIdxInBuf : pFilterInfo->info.colIdx; - SColumnInfo * pColumnInfo = &pFilterInfo->info.data; - /* * NOTE: here the tbname/tags column cannot reach here, since it will never be a filter column, * so we do NOT check if is a tag or not */ - pFilterInfo->pData = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pColumnInfo->colId, - pColumnInfo->type, pColumnInfo->bytes, pFilterInfo->info.colIdxInBuf); + pFilterInfo->pData = doGetDataBlocks(pQuery, data, pFilterInfo->info.colIdxInBuf); } int32_t numOfRes = 0; @@ -1624,7 +1981,10 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * pQuery->order.order, pRuntimeEnv->pTSBuf->cur.order); } - for (int32_t j = 0; j < (*forwardStep); ++j) { + int32_t j = 0; + int64_t lastKey = 0; + + for (j = 0; j < (*forwardStep); ++j) { int32_t offset = GET_COL_DATA_POS(pQuery, j, step); if (pRuntimeEnv->pTSBuf != NULL) { @@ -1643,23 +2003,97 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * continue; } - // decide which group this rows belongs to according to current state value - if (groupbyStateValue) { - char *stateVal = groupbyColumnData + bytes * offset; + // sliding window query + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0) { + // decide the time window according to the primary timestamp + int64_t ts = primaryKeyCol[offset]; + + SSlidingWindowInfo* pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + STimeWindow win = getActiveSlidingWindow(pSlidingWindowInfo, ts, pQuery); - int32_t ret = setGroupResultForKey(pRuntimeEnv, stateVal, type, groupbyColumnData); + int32_t ret = setSlidingWindowFromKey(pRuntimeEnv, &win); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code continue; } - } - // all startOffset are identical - offset -= pCtx[0].startOffset; + // all startOffset are identical + offset -= pCtx[0].startOffset; + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + pCtx[k].nStartQueryTimestamp = win.skey; + + SWindowStatus* pStatus = getSlidingWindowStatus(pSlidingWindowInfo, curSlidingWindow(pSlidingWindowInfo)); + + if (!IS_MASTER_SCAN(pRuntimeEnv) && !pStatus->closed) { +// qTrace("QInfo:%p not completed in supplementary scan, ignore funcId:%d, window:%lld-%lld", +// GET_QINFO_ADDR(pQuery), functionId, pStatus->window.skey, pStatus->window.ekey); + continue; + } + + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunctionF(&pCtx[k], offset); + } + } - for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { - int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - aAggs[functionId].xFunctionF(&pCtx[k], offset); + lastKey = ts; + int32_t index = pRuntimeEnv->swindowResInfo.curIndex; + + STimeWindow nextWin = win; + while (1) { + getNextLogicalQueryRange(pRuntimeEnv, &nextWin); + if (pSlidingWindowInfo->startTime > nextWin.skey || (nextWin.skey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextWin.skey > pQuery->skey && !QUERY_IS_ASC_QUERY(pQuery))) { + pRuntimeEnv->swindowResInfo.curIndex = index; + break; + } + + if (ts >= nextWin.skey && ts <= nextWin.ekey) { + // null data, failed to allocate more memory buffer + if (setSlidingWindowFromKey(pRuntimeEnv, &nextWin) != TSDB_CODE_SUCCESS) { + pRuntimeEnv->swindowResInfo.curIndex = index; + break; + } + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + pCtx[k].nStartQueryTimestamp = nextWin.skey; + + SWindowStatus* pStatus = getSlidingWindowStatus(pSlidingWindowInfo, curSlidingWindow(pSlidingWindowInfo)); + if (!IS_MASTER_SCAN(pRuntimeEnv) && !pStatus->closed) { +// qTrace("QInfo:%p not completed in supplementary scan, ignore funcId:%d, window:%lld-%lld", +// GET_QINFO_ADDR(pQuery), functionId, pStatus->window.skey, pStatus->window.ekey); + continue; + } + + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunctionF(&pCtx[k], offset); + } + } + } else { + pRuntimeEnv->swindowResInfo.curIndex = index; + break; + } + } + } else { // other queries + // decide which group this rows belongs to according to current state value + if (groupbyStateValue) { + char *stateVal = groupbyColumnData + bytes * offset; + + int32_t ret = setGroupResultFromKey(pRuntimeEnv, stateVal, type, bytes); + if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code + continue; + } + } + + // all startOffset are identical + offset -= pCtx[0].startOffset; + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunctionF(&pCtx[k], offset); + } } } @@ -1684,12 +2118,49 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t * free(sasArray); + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0 && IS_MASTER_SCAN(pRuntimeEnv)) { + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + // query completed + if ((lastKey >= pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (lastKey <= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + closeAllSlidingWindow(pSlidingWindowInfo); + + pSlidingWindowInfo->curIndex = pSlidingWindowInfo->size - 1; + setQueryStatus(pQuery, QUERY_COMPLETED | QUERY_RESBUF_FULL); + } else { + int32_t i = 0; + int64_t skey = 0; + + for (i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if ((pStatus->window.ekey <= lastKey && QUERY_IS_ASC_QUERY(pQuery)) || + (pStatus->window.skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) { + closeSlidingWindow(pSlidingWindowInfo, i); + } else { + skey = pStatus->window.skey; + break; + } + } + + pSlidingWindowInfo->prevSKey = skey; + + // the number of completed slots are larger than the threshold, dump to client immediately. + int32_t v = numOfClosedSlidingWindow(pSlidingWindowInfo); + if (v > pSlidingWindowInfo->threshold) { + setQueryStatus(pQuery, QUERY_RESBUF_FULL); + } + + dTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pQuery), pSlidingWindowInfo->size, v); + } + } + /* * No need to calculate the number of output results for groupby normal columns * because the results of group by normal column is put into intermediate buffer. */ int32_t num = 0; - if (!groupbyStateValue) { + if (!groupbyStateValue && !(pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { num = getNumOfResult(pRuntimeEnv) - prevNumOfRes; } @@ -1759,7 +2230,7 @@ static void validateQueryRangeAndData(SQueryRuntimeEnv *pRuntimeEnv, const TSKEY } static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInfo, int64_t *pPrimaryColumn, - char *sdata, SField *pFields, __block_search_fn_t searchFn, int32_t *numOfRes) { + SField *pFields, __block_search_fn_t searchFn, int32_t *numOfRes) { int32_t forwardStep = 0; SQuery *pQuery = pRuntimeEnv->pQuery; @@ -1776,7 +2247,7 @@ static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo * // no qualified data in current block, do not update the lastKey value assert(pQuery->ekey < pPrimaryColumn[pQuery->pos]); } else { - pQuery->lastKey = pPrimaryColumn[pQuery->pos + (forwardStep - 1)] + step; + pQuery->lastKey = pQuery->ekey + step;//pPrimaryColumn[pQuery->pos + (forwardStep - 1)] + step; } } else { @@ -1794,7 +2265,7 @@ static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo * // no qualified data in current block, do not update the lastKey value assert(pQuery->ekey > pPrimaryColumn[pQuery->pos]); } else { - pQuery->lastKey = pPrimaryColumn[pQuery->pos - (forwardStep - 1)] + step; + pQuery->lastKey = pQuery->ekey + step;//pPrimaryColumn[pQuery->pos - (forwardStep - 1)] + step; } } else { forwardStep = pQuery->pos + 1; @@ -1812,14 +2283,11 @@ static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo * pQuery->lastKey = pPrimaryColumn[pQuery->pos + (newForwardStep - 1) * step] + step; } - bool isFileBlock = IS_FILE_BLOCK(pRuntimeEnv->blockStatus); - - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - *numOfRes = - rowwiseApplyAllFunctions(pRuntimeEnv, &newForwardStep, pPrimaryColumn, sdata, pFields, pBlockInfo, isFileBlock); + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr) || + (pQuery->slidingTime != -1 && pQuery->nAggTimeInterval > 0)) { + *numOfRes = rowwiseApplyAllFunctions(pRuntimeEnv, &newForwardStep, pPrimaryColumn, pFields, pBlockInfo); } else { - *numOfRes = blockwiseApplyAllFunctions(pRuntimeEnv, newForwardStep, pPrimaryColumn, sdata, pFields, pBlockInfo, - isFileBlock); + *numOfRes = blockwiseApplyAllFunctions(pRuntimeEnv, newForwardStep, pPrimaryColumn, pFields, pBlockInfo); } assert(*numOfRes >= 0); @@ -1841,8 +2309,8 @@ int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, return -1; } - SQueryFilesInfo* pVnodeFiles = &pRuntimeEnv->vnodeFileInfo; - + SQueryFilesInfo *pVnodeFiles = &pRuntimeEnv->vnodeFileInfo; + /* set the initial file for current query */ if (order == TSQL_SO_ASC && *fid < pVnodeFiles->pFileInfo[0].fileID) { *fid = pVnodeFiles->pFileInfo[0].fileID; @@ -1861,7 +2329,7 @@ int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, if (order == TSQL_SO_ASC) { int32_t i = 0; - int32_t step = 1; + int32_t step = QUERY_ASC_FORWARD_STEP; while (i pVnodeFiles->pFileInfo[i].fileID) { i += step; @@ -1875,7 +2343,7 @@ int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, } } else { int32_t i = numOfFiles - 1; - int32_t step = -1; + int32_t step = QUERY_DESC_FORWARD_STEP; while (i >= 0 && *fid < pVnodeFiles->pFileInfo[i].fileID) { i += step; @@ -1894,17 +2362,17 @@ int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeter SQuery *pQuery = pRuntimeEnv->pQuery; pQuery->fileId += step; - int32_t fid = 0; + int32_t fileIndex = 0; int32_t order = (step == QUERY_ASC_FORWARD_STEP) ? TSQL_SO_ASC : TSQL_SO_DESC; while (1) { - fid = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, order); + fileIndex = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, order); // no files left, abort - if (fid < 0) { + if (fileIndex < 0) { if (step == QUERY_ASC_FORWARD_STEP) { - dTrace("QInfo:%p no file to access, try data in cache", GET_QINFO_ADDR(pQuery)); + dTrace("QInfo:%p no more file to access, try data in cache", GET_QINFO_ADDR(pQuery)); } else { - dTrace("QInfo:%p no file to access in desc order, query completed", GET_QINFO_ADDR(pQuery)); + dTrace("QInfo:%p no more file to access in desc order, query completed", GET_QINFO_ADDR(pQuery)); } vnodeFreeFieldsEx(pRuntimeEnv); @@ -1913,7 +2381,7 @@ int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeter } // failed to mmap header file into memory will cause the retrieval of compblock info failed - if (vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fid) > 0) { + if (vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fileIndex) > 0) { break; } @@ -1926,15 +2394,15 @@ int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeter pQuery->fileId += step; /* for backwards search, if the first file is not valid, abort */ - if (step < 0 && fid == 0) { + if (step < 0 && fileIndex == 0) { vnodeFreeFieldsEx(pRuntimeEnv); pQuery->fileId = -1; - fid = -1; + fileIndex = -1; break; } } - return fid; + return fileIndex; } void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimestamp, void *inputData, @@ -1970,8 +2438,9 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimes // store the first&last timestamp into the intermediate buffer [1], the true // value may be null but timestamp will never be null pCtx->ptsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE); - } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TWA || - functionId == TSDB_FUNC_DIFF) { + } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || + functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_DIFF || + (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { /* * leastsquares function needs two columns of input, currently, the x value of linear equation is set to * timestamp column, and the y-value is the column specified in pQuery->pSelectExpr[i].colIdxInBuffer @@ -1988,7 +2457,7 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimes pCtx->ptsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE); } else if (functionId == TSDB_FUNC_ARITHM) { - pCtx->param[0].pz = param; + pCtx->param[1].pz = param; } pCtx->startOffset = startOffset; @@ -2011,20 +2480,21 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimes } // set the output buffer for the selectivity + tag query -static void setCtxTagColumnInfo(SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv) { +static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) { if (isSelectivityWithTagsQuery(pQuery)) { int32_t num = 0; - SQLFunctionCtx *pCtx = NULL; + SQLFunctionCtx *p = NULL; + int16_t tagLen = 0; SQLFunctionCtx **pTagCtx = calloc(pQuery->numOfOutputCols, POINTER_BYTES); for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { SSqlFuncExprMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].pBase; if (pSqlFuncMsg->functionId == TSDB_FUNC_TAG_DUMMY || pSqlFuncMsg->functionId == TSDB_FUNC_TS_DUMMY) { - tagLen += pRuntimeEnv->pCtx[i].outputBytes; - pTagCtx[num++] = &pRuntimeEnv->pCtx[i]; + tagLen += pCtx[i].outputBytes; + pTagCtx[num++] = &pCtx[i]; } else if ((aAggs[pSqlFuncMsg->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { - pCtx = &pRuntimeEnv->pCtx[i]; + p = &pCtx[i]; } else if (pSqlFuncMsg->functionId == TSDB_FUNC_TS || pSqlFuncMsg->functionId == TSDB_FUNC_TAG) { // tag function may be the group by tag column // ts may be the required primary timestamp column @@ -2034,14 +2504,14 @@ static void setCtxTagColumnInfo(SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv) { } } - pCtx->tagInfo.pTagCtxList = pTagCtx; - pCtx->tagInfo.numOfTagCols = num; - pCtx->tagInfo.tagsLen = tagLen; + p->tagInfo.pTagCtxList = pTagCtx; + p->tagInfo.numOfTagCols = num; + p->tagInfo.tagsLen = tagLen; } } static int32_t setupQueryRuntimeEnv(SMeterObj *pMeterObj, SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv, - SSchema *pTagsSchema, int16_t order, bool isMetricQuery) { + SColumnModel *pTagsSchema, int16_t order, bool isSTableQuery) { dTrace("QInfo:%p setup runtime env", GET_QINFO_ADDR(pQuery)); pRuntimeEnv->pMeterObj = pMeterObj; @@ -2062,8 +2532,10 @@ static int32_t setupQueryRuntimeEnv(SMeterObj *pMeterObj, SQuery *pQuery, SQuery SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; if (TSDB_COL_IS_TAG(pSqlFuncMsg->colInfo.flag)) { // process tag column info - pCtx->inputType = pTagsSchema[pColIndexEx->colIdx].type; - pCtx->inputBytes = pTagsSchema[pColIndexEx->colIdx].bytes; + SSchema* pSchema = getColumnModelSchema(pTagsSchema, pColIndexEx->colIdx); + + pCtx->inputType = pSchema->type; + pCtx->inputBytes = pSchema->bytes; } else { pCtx->inputType = GET_COLUMN_TYPE(pQuery, i); pCtx->inputBytes = GET_COLUMN_BYTES(pQuery, i); @@ -2077,14 +2549,15 @@ static int32_t setupQueryRuntimeEnv(SMeterObj *pMeterObj, SQuery *pQuery, SQuery pCtx->order = pQuery->order.order; pCtx->functionId = pSqlFuncMsg->functionId; - /* - * tricky: in case of char array parameters, we employ the shallow copy - * method and get the ownership of the char array, it later release the allocated memory if exists - */ pCtx->numOfParams = pSqlFuncMsg->numOfParams; for (int32_t j = 0; j < pCtx->numOfParams; ++j) { - pCtx->param[j].nType = pSqlFuncMsg->arg[j].argType; - pCtx->param[j].i64Key = pSqlFuncMsg->arg[j].argValue.i64; + int16_t type = pSqlFuncMsg->arg[j].argType; + int16_t bytes = pSqlFuncMsg->arg[j].argBytes; + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + tVariantCreateFromBinary(&pCtx->param[j], pSqlFuncMsg->arg->argValue.pz, bytes, type); + } else { + tVariantCreateFromBinary(&pCtx->param[j], (char *)&pSqlFuncMsg->arg[j].argValue.i64, bytes, type); + } } // set the order information for top/bottom query @@ -2108,72 +2581,24 @@ static int32_t setupQueryRuntimeEnv(SMeterObj *pMeterObj, SQuery *pQuery, SQuery // set the intermediate result output buffer SResultInfo *pResInfo = &pRuntimeEnv->resultInfo[i]; - setResultInfoBuf(pResInfo, pQuery->pSelectExpr[i].interResBytes, isMetricQuery); + setResultInfoBuf(pResInfo, pQuery->pSelectExpr[i].interResBytes, isSTableQuery); } // if it is group by normal column, do not set output buffer, the output buffer is pResult - if (!isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isMetricQuery) { + if (!isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isSTableQuery) { resetCtxOutputBuf(pRuntimeEnv); } - setCtxTagColumnInfo(pQuery, pRuntimeEnv); + setCtxTagColumnInfo(pQuery, pRuntimeEnv->pCtx); // for loading block data in memory assert(vnodeList[pMeterObj->vnode].cfg.rowsInFileBlock == pMeterObj->pointsPerFileBlock); - - // To make sure the start position of each buffer is aligned to 4bytes in 32-bit ARM system. - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - int32_t bytes = pQuery->colList[i].data.bytes; - pRuntimeEnv->colDataBuffer[i] = calloc(1, sizeof(SData) + EXTRA_BYTES + pMeterObj->pointsPerFileBlock * bytes); - if (pRuntimeEnv->colDataBuffer[i] == NULL) { - goto _error_clean; - } - } - - // record the maximum column width among columns of this meter/metric - int32_t maxColWidth = pQuery->colList[0].data.bytes; - for (int32_t i = 1; i < pQuery->numOfCols; ++i) { - int32_t bytes = pQuery->colList[i].data.bytes; - if (bytes > maxColWidth) { - maxColWidth = bytes; - } - } - - pRuntimeEnv->primaryColBuffer = NULL; - if (PRIMARY_TSCOL_LOADED(pQuery)) { - pRuntimeEnv->primaryColBuffer = pRuntimeEnv->colDataBuffer[0]; - } else { - pRuntimeEnv->primaryColBuffer = - (SData *)malloc(pMeterObj->pointsPerFileBlock * TSDB_KEYSIZE + sizeof(SData) + EXTRA_BYTES); - } - - pRuntimeEnv->unzipBufSize = (size_t)(maxColWidth * pMeterObj->pointsPerFileBlock + EXTRA_BYTES); // plus extra_bytes - - pRuntimeEnv->unzipBuffer = (char *)malloc(pRuntimeEnv->unzipBufSize); - pRuntimeEnv->secondaryUnzipBuffer = (char *)calloc(1, pRuntimeEnv->unzipBufSize); - - if (pRuntimeEnv->unzipBuffer == NULL || pRuntimeEnv->secondaryUnzipBuffer == NULL || - pRuntimeEnv->primaryColBuffer == NULL) { - goto _error_clean; - } - return TSDB_CODE_SUCCESS; _error_clean: tfree(pRuntimeEnv->resultInfo); tfree(pRuntimeEnv->pCtx); - for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { - tfree(pRuntimeEnv->colDataBuffer[i]); - } - - tfree(pRuntimeEnv->unzipBuffer); - tfree(pRuntimeEnv->secondaryUnzipBuffer); - - if (!PRIMARY_TSCOL_LOADED(pQuery)) { - tfree(pRuntimeEnv->primaryColBuffer); - } - return TSDB_CODE_SERV_OUT_OF_MEMORY; } @@ -2188,8 +2613,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { } tfree(pRuntimeEnv->secondaryUnzipBuffer); - - taosCleanUpIntHash(pRuntimeEnv->hashList); + destroySlidingWindowInfo(&pRuntimeEnv->swindowResInfo); if (pRuntimeEnv->pCtx != NULL) { for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { @@ -2200,6 +2624,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { } tVariantDestroy(&pCtx->tag); + tfree(pCtx->tagInfo.pTagCtxList); tfree(pRuntimeEnv->resultInfo[i].interResultBuf); } @@ -2213,13 +2638,15 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tfree(pRuntimeEnv->primaryColBuffer); } - doCloseOpenedFileData(&pRuntimeEnv->vnodeFileInfo); + doCloseQueryFiles(&pRuntimeEnv->vnodeFileInfo); if (pRuntimeEnv->vnodeFileInfo.pFileInfo != NULL) { pRuntimeEnv->vnodeFileInfo.numOfFiles = 0; free(pRuntimeEnv->vnodeFileInfo.pFileInfo); } + taosDestoryInterpoInfo(&pRuntimeEnv->interpoInfo); + if (pRuntimeEnv->pInterpoBuf != NULL) { for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { tfree(pRuntimeEnv->pInterpoBuf[i]); @@ -2228,19 +2655,17 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tfree(pRuntimeEnv->pInterpoBuf); } - if (pRuntimeEnv->pTSBuf != NULL) { - tsBufDestory(pRuntimeEnv->pTSBuf); - pRuntimeEnv->pTSBuf = NULL; - } + pRuntimeEnv->pTSBuf = tsBufDestory(pRuntimeEnv->pTSBuf); } // get maximum time interval in each file static int64_t getOldestKey(int32_t numOfFiles, int64_t fileId, SVnodeCfg *pCfg) { - int64_t duration = pCfg->daysPerFile * tsMsPerDay[pCfg->precision]; + int64_t duration = pCfg->daysPerFile * tsMsPerDay[(uint8_t)pCfg->precision]; return (fileId - numOfFiles + 1) * duration; } bool isQueryKilled(SQuery *pQuery) { + return false; SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); /* @@ -2248,7 +2673,7 @@ bool isQueryKilled(SQuery *pQuery) { * if it will be deleted soon, stop current query ASAP. */ SMeterObj *pMeterObj = pQInfo->pObj; - if (vnodeIsMeterState(pMeterObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pMeterObj, TSDB_METER_STATE_DROPPING)) { pQInfo->killed = 1; return true; } @@ -2279,11 +2704,6 @@ bool isFixedOutputQuery(SQuery *pQuery) { continue; } - // // ignore the group by + projection combination - // if (pExprMsg->functionId == TSDB_FUNC_PRJ && isGroupbyNormalCol(pQuery)) { - // continue; - // } - if (!IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].nStatus)) { return true; } @@ -2304,6 +2724,22 @@ bool isPointInterpoQuery(SQuery *pQuery) { } // TODO REFACTOR:MERGE WITH CLIENT-SIDE FUNCTION +bool isSumAvgRateQuery(SQuery *pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (functionId == TSDB_FUNC_TS) { + continue; + } + + if (functionId == TSDB_FUNC_SUM_RATE || functionId == TSDB_FUNC_SUM_IRATE || + functionId == TSDB_FUNC_AVG_RATE || functionId == TSDB_FUNC_AVG_IRATE) { + return true; + } + } + + return false; +} + bool isTopBottomQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; @@ -2330,6 +2766,11 @@ bool isFirstLastRowQuery(SQuery *pQuery) { return false; } +bool notHasQueryTimeRange(SQuery *pQuery) { + return (pQuery->skey == 0 && pQuery->ekey == INT64_MAX && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->skey == INT64_MAX && pQuery->ekey == 0 && (!QUERY_IS_ASC_QUERY(pQuery))); +} + bool isTSCompQuery(SQuery *pQuery) { return pQuery->pSelectExpr[0].pBase.functionId == TSDB_FUNC_TS_COMP; } bool needSupplementaryScan(SQuery *pQuery) { @@ -2404,11 +2845,13 @@ static int32_t getFirstCacheSlot(int32_t numOfBlocks, int32_t lastSlot, SCacheIn return (lastSlot - numOfBlocks + 1 + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; } -static bool cacheBoundaryCheck(SQuery *pQuery, SMeterObj *pMeterObj) { +static bool cacheBoundaryCheck(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj) { /* * here we get the first slot from the meter cache, not from the cache snapshot from pQuery, since the * snapshot value in pQuery may have been expired now. */ + SQuery *pQuery = pRuntimeEnv->pQuery; + SCacheInfo * pCacheInfo = (SCacheInfo *)pMeterObj->pCache; SCacheBlock *pBlock = NULL; @@ -2432,8 +2875,8 @@ static bool cacheBoundaryCheck(SQuery *pQuery, SMeterObj *pMeterObj) { * pBlock may be null value since this block is flushed to disk, and re-distributes to * other meter, so go on until we get the first not flushed cache block. */ - if ((pBlock = getCacheDataBlock(pMeterObj, pQuery, first)) != NULL) { - keyFirst = getTimestampInCacheBlock(pBlock, 0); + if ((pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, first)) != NULL) { + keyFirst = getTimestampInCacheBlock(pRuntimeEnv, pBlock, 0); break; } else { /* @@ -2465,18 +2908,23 @@ void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t v // commitSlot here denotes the first uncommitted block in cache int32_t numOfBlocks = 0; int32_t lastSlot = 0; + int32_t commitSlot = 0; + int32_t commitPoint = 0; SCachePool *pPool = (SCachePool *)vnodeList[vid].pCachePool; pthread_mutex_lock(&pPool->vmutex); numOfBlocks = pCacheInfo->numOfBlocks; lastSlot = pCacheInfo->currentSlot; + commitSlot = pCacheInfo->commitSlot; + commitPoint = pCacheInfo->commitPoint; pthread_mutex_unlock(&pPool->vmutex); // make sure it is there, otherwise, return right away pQuery->currentSlot = lastSlot; pQuery->numOfBlocks = numOfBlocks; pQuery->firstSlot = getFirstCacheSlot(numOfBlocks, lastSlot, pCacheInfo); - ; + pQuery->commitSlot = commitSlot; + pQuery->commitPoint = commitPoint; /* * Note: the block id is continuous increasing, never becomes smaller. @@ -2510,7 +2958,7 @@ int64_t getQueryStartPositionInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t *slo assert((pQuery->lastKey >= pQuery->skey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey <= pQuery->skey && !QUERY_IS_ASC_QUERY(pQuery))); - if (!ignoreQueryRange && !cacheBoundaryCheck(pQuery, pMeterObj)) { + if (!ignoreQueryRange && !cacheBoundaryCheck(pRuntimeEnv, pMeterObj)) { return -1; } @@ -2525,8 +2973,16 @@ int64_t getQueryStartPositionInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t *slo /* locate the first point of which time stamp is no less than pQuery->skey */ __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; - SCacheBlock *pBlock = pCacheInfo->cacheBlocks[*slot]; - (*pos) = searchFn(pBlock->offset[0], pBlock->numOfPoints, pQuery->skey, pQuery->order.order); + pQuery->slot = *slot; + + // cache block has been flushed to disk, no required data block in cache. + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); + if (pBlock == NULL) { + pQuery->skey = rawskey; // restore the skey + return -1; + } + + (*pos) = searchFn(pRuntimeEnv->primaryColBuffer->data, pBlock->numOfPoints, pQuery->skey, pQuery->order.order); // restore skey before return pQuery->skey = rawskey; @@ -2536,7 +2992,7 @@ int64_t getQueryStartPositionInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t *slo return -1; } - int64_t nextKey = getTimestampInCacheBlock(pBlock, *pos); + int64_t nextKey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, *pos); if ((nextKey < pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || (nextKey > pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))) { // all data are less than the pQuery->lastKey(pQuery->sKey) for asc query @@ -2588,7 +3044,7 @@ bool hasDataInCache(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj) { return false; } - return cacheBoundaryCheck(pQuery, pMeterObj); + return cacheBoundaryCheck(pRuntimeEnv, pMeterObj); } /** @@ -2604,90 +3060,82 @@ void vnodeCheckIfDataExists(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, setQueryStatus(pQuery, QUERY_NOT_COMPLETED); } -static void doGetAlignedIntervalQueryRangeImpl(SQuery *pQuery, int64_t qualifiedKey, int64_t keyFirst, int64_t keyLast, - int64_t *skey, int64_t *ekey) { - assert(qualifiedKey >= keyFirst && qualifiedKey <= keyLast); +static void doGetAlignedIntervalQueryRangeImpl(SQuery *pQuery, int64_t pKey, int64_t keyFirst, int64_t keyLast, + int64_t *actualSkey, int64_t *actualEkey, int64_t *skey, int64_t *ekey) { + assert(pKey >= keyFirst && pKey <= keyLast); + *skey = taosGetIntervalStartTimestamp(pKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); if (keyFirst > (INT64_MAX - pQuery->nAggTimeInterval)) { /* - * if the skey > INT64_MAX - pQuery->nAggTimeInterval, the query duration between - * skey and ekey must be less than one interval.Therefore, no need to adjust the query ranges. + * if the actualSkey > INT64_MAX - pQuery->nAggTimeInterval, the query duration between + * actualSkey and actualEkey must be less than one interval.Therefore, no need to adjust the query ranges. */ assert(keyLast - keyFirst < pQuery->nAggTimeInterval); - *skey = keyFirst; - *ekey = keyLast; + *actualSkey = keyFirst; + *actualEkey = keyLast; + + *ekey = INT64_MAX; return; } - *skey = taosGetIntervalStartTimestamp(qualifiedKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, - pQuery->precision); - int64_t endKey = *skey + pQuery->nAggTimeInterval - 1; + *ekey = *skey + pQuery->nAggTimeInterval - 1; if (*skey < keyFirst) { - *skey = keyFirst; + *actualSkey = keyFirst; + } else { + *actualSkey = *skey; } - if (endKey < keyLast) { - *ekey = endKey; + if (*ekey < keyLast) { + *actualEkey = *ekey; } else { - *ekey = keyLast; + *actualEkey = keyLast; } } -static void doGetAlignedIntervalQueryRange(SQuery *pQuery, TSKEY key, TSKEY skey, TSKEY ekey) { - TSKEY skey1, ekey1; +static void getAlignedIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key, TSKEY skey, TSKEY ekey) { + SQuery *pQuery = pRuntimeEnv->pQuery; + if (pQuery->nAggTimeInterval == 0 || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { + return; + } + + TSKEY skey2 = MIN(skey, ekey); + TSKEY ekey2 = MAX(skey, ekey); - TSKEY skey2 = (skey < ekey) ? skey : ekey; - TSKEY ekey2 = (skey < ekey) ? ekey : skey; + // the actual first query range in skey1 and ekey1 + TSKEY skey1, ekey1; - doGetAlignedIntervalQueryRangeImpl(pQuery, key, skey2, ekey2, &skey1, &ekey1); + TSKEY windowSKey = 0, windowEKey = 0; + doGetAlignedIntervalQueryRangeImpl(pQuery, key, skey2, ekey2, &skey1, &ekey1, &windowSKey, &windowEKey); if (QUERY_IS_ASC_QUERY(pQuery)) { pQuery->skey = skey1; pQuery->ekey = ekey1; - assert(pQuery->skey <= pQuery->ekey); + + pRuntimeEnv->intervalWindow = (STimeWindow) {.skey = windowSKey, .ekey = windowEKey}; + + assert(pQuery->skey <= pQuery->ekey && + pRuntimeEnv->intervalWindow.skey + (pQuery->nAggTimeInterval - 1) == pRuntimeEnv->intervalWindow.ekey); } else { pQuery->skey = ekey1; pQuery->ekey = skey1; - assert(pQuery->skey >= pQuery->ekey); + + pRuntimeEnv->intervalWindow = (STimeWindow) {.skey = windowEKey, .ekey = windowSKey}; + + assert(pQuery->skey >= pQuery->ekey && + pRuntimeEnv->intervalWindow.skey - (pQuery->nAggTimeInterval - 1) == pRuntimeEnv->intervalWindow.ekey); } - pQuery->lastKey = pQuery->skey; -} - -static void getOneRowFromDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, char **dst, int32_t pos) { - SQuery *pQuery = pRuntimeEnv->pQuery; - - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - int32_t bytes = pQuery->colList[i].data.bytes; - memcpy(dst[i], pRuntimeEnv->colDataBuffer[i]->data + pos * bytes, bytes); - } + pQuery->lastKey = pQuery->skey; } -static void getOneRowFromCacheBlock(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, SCacheBlock *pBlock, - char **dst, int32_t pos) { +static void getOneRowFromDataBlock(SQueryRuntimeEnv *pRuntimeEnv, char **dst, int32_t pos) { SQuery *pQuery = pRuntimeEnv->pQuery; - /* - * in case of cache block expired, the pos may exceed the number of points in block, so check - * the range in the first place. - */ - if (pos > pBlock->numOfPoints) { - pos = pBlock->numOfPoints; - } - for (int32_t i = 0; i < pQuery->numOfCols; ++i) { - int32_t colIdx = pQuery->colList[i].colIdx; - int32_t colId = pQuery->colList[i].data.colId; - - SColumn *pCols = &pMeterObj->schema[colIdx]; - - if (colIdx < 0 || colIdx >= pMeterObj->numOfColumns || pCols->colId != colId) { // set null - setNull(dst[i], pCols->type, pCols->bytes); - } else { - memcpy(dst[i], pBlock->offset[colIdx] + pos * pCols->bytes, pCols->bytes); - } + int32_t bytes = pQuery->colList[i].data.bytes; + memcpy(dst[i], pRuntimeEnv->colDataBuffer[i]->data + pos * bytes, bytes); } } @@ -2716,29 +3164,7 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet pQuery->slot, pQuery->pos); // save the point that is directly after or equals to the specified point - if (IS_DISK_DATA_BLOCK(pQuery)) { - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); - } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; - - while (pBlock == NULL) { - // cache block is flushed to disk, try to find new query position again - getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); - - // new position is located in file, load data and abort - if (IS_DISK_DATA_BLOCK(pQuery)) { - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); - break; - } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - } - } - - if (!IS_DISK_DATA_BLOCK(pQuery)) { - getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pNextPoint, pQuery->pos); - } - } + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); /* * 1. for last_row query, return immediately. @@ -2767,12 +3193,8 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet if (pQuery->pos > 0) { int32_t prevPos = pQuery->pos - 1; - if (IS_DISK_DATA_BLOCK(pQuery)) { - /* save the point that is directly after the specified point */ - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, prevPos); - } else { - getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pPrevPoint, prevPos); - } + /* save the point that is directly after the specified point */ + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, prevPos); } else { __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; @@ -2782,10 +3204,12 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet moveToNextBlock(pRuntimeEnv, QUERY_DESC_FORWARD_STEP, searchFn, true); /* - * no previous data exists reset the status and load the data block that contains the qualified point + * no previous data exists. + * reset the status and load the data block that contains the qualified point */ if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { - dTrace("QInfo:%p no previous data block, start fileId:%d, slot:%d, pos:%d, qrange:%lld-%lld, out of range", + dTrace("QInfo:%p no previous data block, start fileId:%d, slot:%d, pos:%d, qrange:%" PRId64 "-%" PRId64 + ", out of range", GET_QINFO_ADDR(pQuery), pRuntimeEnv->startPos.fileId, pRuntimeEnv->startPos.slot, pRuntimeEnv->startPos.pos, pQuery->skey, pQuery->ekey); @@ -2795,21 +3219,20 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet } else { // prev has been located if (pQuery->fileId >= 0) { pQuery->pos = pQuery->pBlock[pQuery->slot].numOfPoints - 1; - getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos); + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos); qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot, pQuery->pos, pQuery->pos); } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - if (pBlock == NULL) { - // todo nothing, the previous block is flushed to disk - } else { - pQuery->pos = pBlock->numOfPoints - 1; - getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pPrevPoint, pQuery->pos); + // moveToNextBlock make sure there is a available cache block, if exists + assert(vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, -1, true) == DISK_BLOCK_NO_NEED_TO_LOAD); + pBlock = &pRuntimeEnv->cacheBlock; - qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery), - pQuery->fileId, pQuery->slot, pBlock->numOfPoints - 1, pQuery->pos); - } + pQuery->pos = pBlock->numOfPoints - 1; + getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos); + + qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery), + pQuery->fileId, pQuery->slot, pBlock->numOfPoints - 1, pQuery->pos); } } } @@ -2831,7 +3254,7 @@ static bool doGetQueryPos(TSKEY key, SMeterQuerySupportObj *pSupporter, SPointIn if (isPointInterpoQuery(pQuery)) { /* no qualified data in this query range */ return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); } else { - getAlignedIntervalQueryRange(pQuery, key, pQuery->skey, pQuery->ekey); + getAlignedIntervalQueryRange(pRuntimeEnv, key, pQuery->skey, pQuery->ekey); return true; } } else { // key > pQuery->ekey, abort for normal query, continue for interp query @@ -2843,14 +3266,101 @@ static bool doGetQueryPos(TSKEY key, SMeterQuerySupportObj *pSupporter, SPointIn } } +static bool doSetDataInfo(SMeterQuerySupportObj *pSupporter, SPointInterpoSupporter *pPointInterpSupporter, + SMeterObj *pMeterObj, TSKEY nextKey) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + if (isFirstLastRowQuery(pQuery)) { + /* + * if the pQuery->skey != pQuery->ekey for last_row query, + * the query range is existed, so set them both the value of nextKey + */ + if (pQuery->skey != pQuery->ekey) { + assert(pQuery->skey >= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery) && nextKey >= pQuery->ekey && + nextKey <= pQuery->skey); + + pQuery->skey = nextKey; + pQuery->ekey = nextKey; + } + + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); + } else { + getAlignedIntervalQueryRange(pRuntimeEnv, nextKey, pQuery->skey, pQuery->ekey); + return true; + } +} + +// TODO refactor code, the best way to implement the last_row is utilizing the iterator +bool normalizeUnBoundLastRowQuery(SMeterQuerySupportObj *pSupporter, SPointInterpoSupporter *pPointInterpSupporter) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + assert(!QUERY_IS_ASC_QUERY(pQuery) && notHasQueryTimeRange(pQuery)); + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + + TSKEY lastKey = -1; + + pQuery->fileId = -1; + vnodeFreeFieldsEx(pRuntimeEnv); + + // keep in-memory cache status in local variables in case that it may be changed by write operation + getBasicCacheInfoSnapshot(pQuery, pMeterObj->pCache, pMeterObj->vnode); + + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + if (pCacheInfo != NULL && pCacheInfo->cacheBlocks != NULL && pQuery->numOfBlocks > 0) { + pQuery->fileId = -1; + TSKEY key = pMeterObj->lastKey; + + pQuery->skey = key; + pQuery->ekey = key; + pQuery->lastKey = pQuery->skey; + + /* + * cache block may have been flushed to disk, and no data in cache anymore. + * So, copy cache block to local buffer is required. + */ + lastKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); + if (lastKey < 0) { // data has been flushed to disk, try again search in file + lastKey = getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { + return false; + } + } + } else { // no data in cache, try file + TSKEY key = pMeterObj->lastKeyOnFile; + + pQuery->skey = key; + pQuery->ekey = key; + pQuery->lastKey = pQuery->skey; + + bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); + if (!ret) { // no data in file, return false; + return false; + } + + lastKey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + } + + assert(lastKey <= pQuery->skey); + + pQuery->skey = lastKey; + pQuery->ekey = lastKey; + pQuery->lastKey = pQuery->skey; + + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); +} + /** * determine the first query range, according to raw query range [skey, ekey] and group-by interval. * the time interval for aggregating is not enforced to check its validation, the minimum interval is not less than - * 10ms, - * which is guaranteed by parser at client-side + * 10ms, which is guaranteed by parser at client-side */ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySupportObj *pSupporter, - SPointInterpoSupporter *pPointInterpSupporter) { + SPointInterpoSupporter *pPointInterpSupporter, int64_t *key) { SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; @@ -2859,10 +3369,14 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup if (QUERY_IS_ASC_QUERY(pQuery)) { // todo: the action return as the getQueryStartPositionInCache function if (dataInDisk && getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_GREATER_EQUAL, searchFn)) { - TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); - assert(key >= pQuery->skey); + TSKEY nextKey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + assert(nextKey >= pQuery->skey); - return doGetQueryPos(key, pSupporter, pPointInterpSupporter); + if (key != NULL) { + *key = nextKey; + } + + return doGetQueryPos(nextKey, pSupporter, pPointInterpSupporter); } // set no data in file @@ -2875,6 +3389,11 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup } TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); + + if (key != NULL) { + *key = nextKey; + } + return doGetQueryPos(nextKey, pSupporter, pPointInterpSupporter); } else { // descending order @@ -2882,44 +3401,40 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); assert(nextKey == -1 || nextKey <= pQuery->skey); - // valid data in cache - if (nextKey != -1) { + if (key != NULL) { + *key = nextKey; + } + + if (nextKey != -1) { // find qualified data in cache if (nextKey >= pQuery->ekey) { - if (isFirstLastRowQuery(pQuery)) { - return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); - } else { - getAlignedIntervalQueryRange(pQuery, nextKey, pQuery->skey, pQuery->ekey); - return true; - } + return doSetDataInfo(pSupporter, pPointInterpSupporter, pMeterObj, nextKey); } else { /* * nextKey < pQuery->ekey && nextKey < pQuery->lastKey, query range is - * larger than all data, abort NOTE: Interp query does not reach here, since for all interp query, + * larger than all data, abort + * + * NOTE: Interp query does not reach here, since for all interp query, * the query order is ascending order. */ return false; } - } else { // all data in cache are greater than pQuery->lastKey, try file + } else { // all data in cache are greater than pQuery->skey, try file } } if (dataInDisk && getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn)) { - TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); - assert(key <= pQuery->skey); + TSKEY nextKey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + assert(nextKey <= pQuery->skey); - /* key in query range. If not, no qualified in disk file */ - if (key >= pQuery->ekey) { - if (isFirstLastRowQuery(pQuery)) { /* no qualified data in this query range */ - return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); - } else { - getAlignedIntervalQueryRange(pQuery, key, pQuery->skey, pQuery->ekey); - return true; - } - } else { // Goes on in case of key in file less than pMeterObj->lastKey, - // which is also the pQuery->skey - if (isFirstLastRowQuery(pQuery)) { - return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); - } + if (key != NULL) { + *key = nextKey; + } + + // key in query range. If not, no qualified in disk file + if (nextKey >= pQuery->ekey) { + return doSetDataInfo(pSupporter, pPointInterpSupporter, pMeterObj, nextKey); + } else { // In case of all queries, the value of false will be returned if key < pQuery->ekey + return false; } } } @@ -2927,7 +3442,6 @@ bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySup return false; } -// todo handle the mmap relative offset value assert problem int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *position) { TSKEY nextTimestamp = -1; @@ -2945,9 +3459,9 @@ int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *p return -1; } - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); if (pBlock != NULL) { - nextTimestamp = getTimestampInCacheBlock(pBlock, position->pos); + nextTimestamp = getTimestampInCacheBlock(pRuntimeEnv, pBlock, position->pos); } else { // todo fix it } @@ -2960,7 +3474,7 @@ int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *p * currently opened file is not the start file, reset to the start file */ int32_t fileIdx = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, pQuery->order.order); - if (fileIdx < 0) { // ignore the files on disk + if (fileIdx < 0) { // ignore the files on disk dError("QInfo:%p failed to get data file:%d", GET_QINFO_ADDR(pQuery), pQuery->fileId); position->fileId = -1; return -1; @@ -2973,8 +3487,7 @@ int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *p * * If failed to load comp block into memory due some how reasons, e.g., empty header file/not enough memory */ - int32_t numOfBlocks = vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fileIdx); - if (numOfBlocks <= 0) { + if (vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fileIdx) <= 0) { position->fileId = -1; return -1; } @@ -3054,11 +3567,11 @@ static void vnodeRecordAllFiles(SQInfo *pQInfo, int32_t vnodeId) { SQueryFilesInfo *pVnodeFilesInfo = &(pQInfo->pMeterQuerySupporter->runtimeEnv.vnodeFileInfo); pVnodeFilesInfo->vnodeId = vnodeId; - + sprintf(pVnodeFilesInfo->dbFilePathPrefix, "%s/vnode%d/db/", tsDirectory, vnodeId); DIR *pDir = opendir(pVnodeFilesInfo->dbFilePathPrefix); if (pDir == NULL) { - dError("QInfo:%p failed to open directory:%s", pQInfo, pVnodeFilesInfo->dbFilePathPrefix); + dError("QInfo:%p failed to open directory:%s, %s", pQInfo, pVnodeFilesInfo->dbFilePathPrefix, strerror(errno)); return; } @@ -3109,10 +3622,11 @@ static void vnodeRecordAllFiles(SQInfo *pQInfo, int32_t vnodeId) { closedir(pDir); dTrace("QInfo:%p find %d data files in %s to be checked", pQInfo, pVnodeFilesInfo->numOfFiles, - pVnodeFilesInfo->dbFilePathPrefix); + pVnodeFilesInfo->dbFilePathPrefix); /* order the files information according their names */ - qsort(pVnodeFilesInfo->pFileInfo, (size_t)pVnodeFilesInfo->numOfFiles, sizeof(SHeaderFileInfo), file_order_comparator); + qsort(pVnodeFilesInfo->pFileInfo, (size_t)pVnodeFilesInfo->numOfFiles, sizeof(SHeaderFileInfo), + file_order_comparator); } static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInfo, void *pBlock) { @@ -3130,12 +3644,7 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInf } // update the pQuery->limit.offset value, and pQuery->pos value - TSKEY *keys = NULL; - if (IS_DISK_DATA_BLOCK(pQuery)) { - keys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; - } else { - keys = (TSKEY *)(((SCacheBlock *)pBlock)->offset[0]); - } + TSKEY *keys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; int32_t i = 0; if (QUERY_IS_ASC_QUERY(pQuery)) { @@ -3176,7 +3685,7 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInf if (IS_DISK_DATA_BLOCK(pQuery)) { pQuery->skey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); } else { - pQuery->skey = getTimestampInCacheBlock(pBlock, pQuery->pos); + pQuery->skey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, pQuery->pos); } // update the offset value @@ -3211,15 +3720,23 @@ static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB static void changeExecuteScanOrder(SQuery *pQuery, bool metricQuery) { // in case of point-interpolation query, use asc order scan char msg[] = - "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%lld-%lld, " - "new qrange:%lld-%lld"; + "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64 "-%" PRId64 + ", " + "new qrange:%" PRId64 "-%" PRId64; - // descending order query + // descending order query for last_row query if (isFirstLastRowQuery(pQuery)) { dTrace("QInfo:%p scan order changed for last_row query, old:%d, new:%d", GET_QINFO_ADDR(pQuery), pQuery->order.order, TSQL_SO_DESC); pQuery->order.order = TSQL_SO_DESC; + + int64_t skey = MIN(pQuery->skey, pQuery->ekey); + int64_t ekey = MAX(pQuery->skey, pQuery->ekey); + + pQuery->skey = ekey; + pQuery->ekey = skey; + return; } @@ -3292,11 +3809,11 @@ static int32_t doSkipDataBlock(SQueryRuntimeEnv *pRuntimeEnv) { break; } - void *pBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + void *pBlock = getGenericDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); assert(pBlock != NULL); int32_t blockType = IS_DISK_DATA_BLOCK(pQuery) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; - SBlockInfo blockInfo = getBlockBasicInfo(pBlock, blockType); + SBlockInfo blockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, blockType); int32_t maxReads = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.size - pQuery->pos : pQuery->pos + 1; assert(maxReads >= 0); @@ -3310,7 +3827,7 @@ static int32_t doSkipDataBlock(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.keyLast : blockInfo.keyFirst; pQuery->lastKey += step; - qTrace("QInfo:%p skip rows:%d, offset:%lld", GET_QINFO_ADDR(pQuery), maxReads, pQuery->limit.offset); + qTrace("QInfo:%p skip rows:%d, offset:%" PRId64 "", GET_QINFO_ADDR(pQuery), maxReads, pQuery->limit.offset); } } @@ -3325,10 +3842,10 @@ void forwardQueryStartPosition(SQueryRuntimeEnv *pRuntimeEnv) { return; } - void *pBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + void *pBlock = getGenericDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); int32_t blockType = (IS_DISK_DATA_BLOCK(pQuery)) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; - SBlockInfo blockInfo = getBlockBasicInfo(pBlock, blockType); + SBlockInfo blockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, blockType); // get the qualified data that can be skipped int32_t maxReads = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.size - pQuery->pos : pQuery->pos + 1; @@ -3338,6 +3855,8 @@ void forwardQueryStartPosition(SQueryRuntimeEnv *pRuntimeEnv) { updateOffsetVal(pRuntimeEnv, &blockInfo, pBlock); } else { pQuery->limit.offset -= maxReads; + // update the lastkey, since the following skip operation may traverse to another media. update the lastkey first. + pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.keyLast + 1 : blockInfo.keyFirst - 1; doSkipDataBlock(pRuntimeEnv); } } @@ -3390,7 +3909,7 @@ static bool forwardQueryStartPosIfNeeded(SQInfo *pQInfo, SMeterQuerySupportObj * pQuery->lastKey = pQuery->skey; // todo opt performance - if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, NULL) == false) { + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, NULL, NULL) == false) { sem_post(&pQInfo->dataReady); // hack for next read for empty return pQInfo->over = 1; return false; @@ -3516,7 +4035,6 @@ void pointInterpSupporterSetData(SQInfo *pQInfo, SPointInterpoSupporter *pPointI pCtx->numOfParams = 4; SInterpInfo *pInterpInfo = (SInterpInfo *)pRuntimeEnv->pCtx[i].aOutputBuf; - pInterpInfo->pInterpDetail = calloc(1, sizeof(SInterpInfoDetail)); SInterpInfoDetail *pInterpDetail = pInterpInfo->pInterpDetail; @@ -3595,9 +4113,9 @@ void pointInterpSupporterInit(SQuery *pQuery, SPointInterpoSupporter *pInterpoSu int32_t offset = 0; - for (int32_t i = 0, j = 0; i < pQuery->numOfCols; ++i, ++j) { - pInterpoSupport->pPrevPoint[j] = prev + offset; - pInterpoSupport->pNextPoint[j] = next + offset; + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + pInterpoSupport->pPrevPoint[i] = prev + offset; + pInterpoSupport->pNextPoint[i] = next + offset; offset += pQuery->colList[i].data.bytes; } @@ -3633,54 +4151,155 @@ static void allocMemForInterpo(SMeterQuerySupportObj *pSupporter, SQuery *pQuery } } -static int32_t allocateOutputBufForGroup(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, bool isMetricQuery) { - int32_t slot = 0; +static int32_t createQueryOutputBuffer(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, bool isSTableQuery) { + SQueryRuntimeEnv* pRuntimeEnv = &pSupporter->runtimeEnv; + + int32_t numOfRows = 0; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - slot = 10000; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { + numOfRows = 10000; } else { - slot = pSupporter->pSidSet->numOfSubSet; + numOfRows = pSupporter->pSidSet->numOfSubSet; } - pSupporter->pResult = calloc(1, sizeof(SOutputRes) * slot); + createResultBuf(&pRuntimeEnv->pResultBuf, 100, pQuery->rowSize); + + // total number of initial results + pSupporter->pResult = calloc(numOfRows, sizeof(SOutputRes)); if (pSupporter->pResult == NULL) { return TSDB_CODE_SERV_OUT_OF_MEMORY; } - // create group result buffer - for (int32_t k = 0; k < slot; ++k) { + int32_t pageId = -1; + tFilePage* page = NULL; + + for (int32_t k = 0; k < numOfRows; ++k) { SOutputRes *pOneRes = &pSupporter->pResult[k]; + pOneRes->nAlloc = 1; /* - * for top/bottom query, the output for group by normal column, the output rows is equals to the - * maximum rows, instead of 1. + * for single table top/bottom query, the output for group by normal column, the output rows is + * equals to the maximum rows, instead of 1. */ - SSqlFunctionExpr *pExpr = &pQuery->pSelectExpr[1]; - if ((pExpr->pBase.functionId == TSDB_FUNC_TOP || pExpr->pBase.functionId == TSDB_FUNC_BOTTOM) && - pExpr->resType != TSDB_DATA_TYPE_BINARY) { + if (!isSTableQuery && isTopBottomQuery(pQuery)) { + assert(pQuery->numOfOutputCols > 1); + + SSqlFunctionExpr *pExpr = &pQuery->pSelectExpr[1]; pOneRes->nAlloc = pExpr->pBase.arg[0].argValue.i64; - } else { - pOneRes->nAlloc = 1; } - createGroupResultBuf(pQuery, pOneRes, isMetricQuery); + if (page == NULL || page->numOfElems >= pRuntimeEnv->numOfRowsPerPage) { + page = getNewDataBuf(pRuntimeEnv->pResultBuf, 0, &pageId); + } + + assert(pageId >= 0); + + SPosInfo posInfo = {.pageId = pageId, .rowId = page->numOfElems}; + + createQueryResultBuf(pRuntimeEnv, pOneRes, isSTableQuery, &posInfo); + page->numOfElems += 1; // next row is available + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t allocateRuntimeEnvBuf(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + // To make sure the start position of each buffer is aligned to 4bytes in 32-bit ARM system. + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + int32_t bytes = pQuery->colList[i].data.bytes; + pRuntimeEnv->colDataBuffer[i] = calloc(1, sizeof(SData) + EXTRA_BYTES + pMeterObj->pointsPerFileBlock * bytes); + if (pRuntimeEnv->colDataBuffer[i] == NULL) { + goto _error_clean; + } + } + + // record the maximum column width among columns of this meter/metric + int32_t maxColWidth = pQuery->colList[0].data.bytes; + for (int32_t i = 1; i < pQuery->numOfCols; ++i) { + int32_t bytes = pQuery->colList[i].data.bytes; + if (bytes > maxColWidth) { + maxColWidth = bytes; + } + } + + pRuntimeEnv->primaryColBuffer = NULL; + if (PRIMARY_TSCOL_LOADED(pQuery)) { + pRuntimeEnv->primaryColBuffer = pRuntimeEnv->colDataBuffer[0]; + } else { + pRuntimeEnv->primaryColBuffer = + (SData *)malloc(pMeterObj->pointsPerFileBlock * TSDB_KEYSIZE + sizeof(SData) + EXTRA_BYTES); + } + + pRuntimeEnv->unzipBufSize = (size_t)(maxColWidth * pMeterObj->pointsPerFileBlock + EXTRA_BYTES); // plus extra_bytes + + pRuntimeEnv->unzipBuffer = (char *)calloc(1, pRuntimeEnv->unzipBufSize); + pRuntimeEnv->secondaryUnzipBuffer = (char *)calloc(1, pRuntimeEnv->unzipBufSize); + + if (pRuntimeEnv->unzipBuffer == NULL || pRuntimeEnv->secondaryUnzipBuffer == NULL || + pRuntimeEnv->primaryColBuffer == NULL) { + goto _error_clean; } return TSDB_CODE_SUCCESS; + +_error_clean: + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfCols; ++i) { + tfree(pRuntimeEnv->colDataBuffer[i]); + } + + tfree(pRuntimeEnv->unzipBuffer); + tfree(pRuntimeEnv->secondaryUnzipBuffer); + + if (!PRIMARY_TSCOL_LOADED(pQuery)) { + tfree(pRuntimeEnv->primaryColBuffer); + } + + return TSDB_CODE_SERV_OUT_OF_MEMORY; +} + +static int32_t getRowParamForMultiRowsOutput(SQuery* pQuery, bool isSTableQuery) { + int32_t rowparam = 1; + + if (isTopBottomQuery(pQuery) && (!isSTableQuery)) { + rowparam = pQuery->pSelectExpr[1].pBase.arg->argValue.i64; + } + + return rowparam; +} + +static int32_t getNumOfRowsInResultPage(SQuery* pQuery, bool isSTableQuery) { + int32_t rowSize = pQuery->rowSize * getRowParamForMultiRowsOutput(pQuery, isSTableQuery); + return (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / rowSize; +} + +static char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, int32_t columnIndex, SOutputRes* pResult) { + SQuery* pQuery = pRuntimeEnv->pQuery; + tFilePage* page = getResultBufferPageById(pRuntimeEnv->pResultBuf, pResult->pos.pageId); + + int32_t numOfRows = getNumOfRowsInResultPage(pQuery, pRuntimeEnv->stableQuery); + int32_t realRowId = pResult->pos.rowId * getRowParamForMultiRowsOutput(pQuery, pRuntimeEnv->stableQuery); + + return ((char*)page->data) + pRuntimeEnv->offset[columnIndex] * numOfRows + + pQuery->pSelectExpr[columnIndex].resBytes * realRowId; } int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMeterQuerySupportObj *pSupporter, void *param) { SQuery *pQuery = &pQInfo->query; + int32_t code = TSDB_CODE_SUCCESS; + /* + * only the successful complete requries the sem_post/over = 1 operations. + */ if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { - dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + dTrace("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->skey, pQuery->ekey, pQuery->order.order); sem_post(&pQInfo->dataReady); pQInfo->over = 1; - return TSDB_CODE_SUCCESS; } @@ -3695,56 +4314,61 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete pQuery->lastKey = pQuery->skey; doInitQueryFileInfoFD(&pSupporter->runtimeEnv.vnodeFileInfo); - + vnodeInitDataBlockInfo(&pSupporter->runtimeEnv.loadBlockInfo); vnodeInitLoadCompBlockInfo(&pSupporter->runtimeEnv.loadCompBlockInfo); // check data in file or cache bool dataInCache = true; bool dataInDisk = true; - pSupporter->runtimeEnv.pQuery = pQuery; - vnodeCheckIfDataExists(&pSupporter->runtimeEnv, pMeterObj, &dataInDisk, &dataInCache); + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + pRuntimeEnv->pQuery = pQuery; + pRuntimeEnv->pMeterObj = pMeterObj; + + if ((code = allocateRuntimeEnvBuf(pRuntimeEnv, pMeterObj)) != TSDB_CODE_SUCCESS) { + return code; + } + + vnodeCheckIfDataExists(pRuntimeEnv, pMeterObj, &dataInDisk, &dataInCache); /* data in file or cache is not qualified for the query. abort */ if (!(dataInCache || dataInDisk)) { dTrace("QInfo:%p no result in query", pQInfo); sem_post(&pQInfo->dataReady); pQInfo->over = 1; - - return TSDB_CODE_SUCCESS; + return code; } - pSupporter->runtimeEnv.pTSBuf = param; - pSupporter->runtimeEnv.cur.vnodeIndex = -1; + pRuntimeEnv->pTSBuf = param; + pRuntimeEnv->cur.vnodeIndex = -1; if (param != NULL) { - int16_t order = (pQuery->order.order == pSupporter->runtimeEnv.pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; - tsBufSetTraverseOrder(pSupporter->runtimeEnv.pTSBuf, order); + int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; + tsBufSetTraverseOrder(pRuntimeEnv->pTSBuf, order); } // create runtime environment - int32_t ret = setupQueryRuntimeEnv(pMeterObj, pQuery, &pSupporter->runtimeEnv, NULL, pQuery->order.order, false); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + code = setupQueryRuntimeEnv(pMeterObj, pQuery, &pSupporter->runtimeEnv, NULL, pQuery->order.order, false); + if (code != TSDB_CODE_SUCCESS) { + return code; } vnodeRecordAllFiles(pQInfo, pMeterObj->vnode); - - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - if ((ret = allocateOutputBufForGroup(pSupporter, pQuery, false)) != TSDB_CODE_SUCCESS) { - return ret; + + pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, false); + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { + if ((code = createQueryOutputBuffer(pSupporter, pQuery, false)) != TSDB_CODE_SUCCESS) { + return code; } - pSupporter->runtimeEnv.hashList = taosInitIntHash(10039, sizeof(void *), taosHashInt); - pSupporter->runtimeEnv.usedIndex = 0; - pSupporter->runtimeEnv.pResult = pSupporter->pResult; - } + int16_t type = TSDB_DATA_TYPE_NULL; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); + } else { + type = TSDB_DATA_TYPE_TIMESTAMP; + } - // in case of last_row query, we set the query timestamp to pMeterObj->lastKey; - if (isFirstLastRowQuery(pQuery)) { - pQuery->skey = pMeterObj->lastKey; - pQuery->ekey = pMeterObj->lastKey; - pQuery->lastKey = pQuery->skey; + initSlidingWindowInfo(&pRuntimeEnv->swindowResInfo, 3, type, pQuery->rowSize, pSupporter->pResult); } pSupporter->rawSKey = pQuery->skey; @@ -3757,14 +4381,79 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete SPointInterpoSupporter interpInfo = {0}; pointInterpSupporterInit(pQuery, &interpInfo); - if ((normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo) == false) || - (isFixedOutputQuery(pQuery) && !isTopBottomQuery(pQuery) && (pQuery->limit.offset > 0)) || - (isTopBottomQuery(pQuery) && pQuery->limit.offset >= pQuery->pSelectExpr[1].pBase.arg[0].argValue.i64)) { - sem_post(&pQInfo->dataReady); - pQInfo->over = 1; + /* + * in case of last_row query without query range, we set the query timestamp to + * pMeterObj->lastKey. Otherwise, keep the initial query time range unchanged. + */ - pointInterpSupporterDestroy(&interpInfo); - return TSDB_CODE_SUCCESS; + if (isFirstLastRowQuery(pQuery) && notHasQueryTimeRange(pQuery)) { + if (!normalizeUnBoundLastRowQuery(pSupporter, &interpInfo)) { + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + pointInterpSupporterDestroy(&interpInfo); + return TSDB_CODE_SUCCESS; + } + } else { + // find the skey and ekey in case of sliding query + // todo refactor + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0) { + int64_t skey = 0; + + SWAP(pQuery->skey, pQuery->ekey, int64_t); + pQuery->order.order ^= 1; + pQuery->lastKey = pQuery->skey; + + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo, &skey) == false) { + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + pointInterpSupporterDestroy(&interpInfo); + return TSDB_CODE_SUCCESS; + } + + pQuery->skey = skey; + + pQuery->order.order ^= 1; + SWAP(pQuery->skey, pQuery->ekey, int64_t); + + int64_t ekey = 0; + pQuery->lastKey = pQuery->skey; + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo, &ekey) == false) { + // + } + + pQuery->skey = ekey; + + TSKEY skey1, ekey1; + TSKEY windowSKey = 0, windowEKey = 0; + + TSKEY minKey = MIN(pQuery->skey, pQuery->ekey); + TSKEY maxKey = MAX(pQuery->skey, pQuery->ekey); + + doGetAlignedIntervalQueryRangeImpl(pQuery, minKey, minKey, maxKey, &skey1, &ekey1, &windowSKey, &windowEKey); + pRuntimeEnv->swindowResInfo.startTime = windowSKey; + + pSupporter->rawSKey = pQuery->skey; + pSupporter->rawEKey = pQuery->ekey; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + pRuntimeEnv->swindowResInfo.prevSKey = windowSKey; + } else { + pRuntimeEnv->swindowResInfo.prevSKey = windowSKey + ((pQuery->skey - windowSKey) / pQuery->slidingTime) * pQuery->slidingTime; + } + } else { + int64_t ekey = 0; + if ((normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo, &ekey) == false) || + (isFixedOutputQuery(pQuery) && !isTopBottomQuery(pQuery) && (pQuery->limit.offset > 0)) || + (isTopBottomQuery(pQuery) && pQuery->limit.offset >= pQuery->pSelectExpr[1].pBase.arg[0].argValue.i64)) { + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + pointInterpSupporterDestroy(&interpInfo); + return TSDB_CODE_SUCCESS; + } + } } /* @@ -3780,7 +4469,7 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete int64_t rs = taosGetIntervalStartTimestamp(pSupporter->rawSKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); - taosInitInterpoInfo(&pSupporter->runtimeEnv.interpoInfo, pQuery->order.order, rs, 0, 0); + taosInitInterpoInfo(&pRuntimeEnv->interpoInfo, pQuery->order.order, rs, 0, 0); allocMemForInterpo(pSupporter, pQuery, pMeterObj); if (!isPointInterpoQuery(pQuery)) { @@ -3789,6 +4478,8 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete // the pQuery->skey is changed during normalizedFirstQueryRange, so set the newest lastkey value pQuery->lastKey = pQuery->skey; + pRuntimeEnv->stableQuery = false; + return TSDB_CODE_SUCCESS; } @@ -3803,14 +4494,15 @@ void vnodeQueryFreeQInfoEx(SQInfo *pQInfo) { teardownQueryRuntimeEnv(&pSupporter->runtimeEnv); tfree(pSupporter->pMeterSidExtInfo); - if (pSupporter->pMeterObj != NULL) { - taosCleanUpIntHash(pSupporter->pMeterObj); - pSupporter->pMeterObj = NULL; + if (pSupporter->pMetersHashTable != NULL) { + taosCleanUpHashTable(pSupporter->pMetersHashTable); + pSupporter->pMetersHashTable = NULL; } - if (pSupporter->pSidSet != NULL || isGroupbyNormalCol(pQInfo->query.pGroupbyExpr)) { + if (pSupporter->pSidSet != NULL || isGroupbyNormalCol(pQInfo->query.pGroupbyExpr) || + (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { int32_t size = 0; - if (isGroupbyNormalCol(pQInfo->query.pGroupbyExpr)) { + if (isGroupbyNormalCol(pQInfo->query.pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { size = 10000; } else if (pSupporter->pSidSet != NULL) { size = pSupporter->pSidSet->numOfSubSet; @@ -3821,15 +4513,6 @@ void vnodeQueryFreeQInfoEx(SQInfo *pQInfo) { } } - if (FD_VALID(pSupporter->meterOutputFd)) { - assert(pSupporter->meterOutputMMapBuf != NULL); - dTrace("QInfo:%p disk-based output buffer during query:%lld bytes", pQInfo, pSupporter->bufSize); - munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); - tclose(pSupporter->meterOutputFd); - - unlink(pSupporter->extBufFile); - } - tSidSetDestroy(&pSupporter->pSidSet); if (pSupporter->pMeterDataInfo != NULL) { @@ -3850,7 +4533,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { - dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + dTrace("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->skey, pQuery->ekey, pQuery->order.order); sem_post(&pQInfo->dataReady); @@ -3864,10 +4547,11 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) pQuery->pointsRead = 0; changeExecuteScanOrder(pQuery, true); - - doInitQueryFileInfoFD(&pSupporter->runtimeEnv.vnodeFileInfo); - vnodeInitDataBlockInfo(&pSupporter->runtimeEnv.loadBlockInfo); - vnodeInitLoadCompBlockInfo(&pSupporter->runtimeEnv.loadCompBlockInfo); + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + + doInitQueryFileInfoFD(&pRuntimeEnv->vnodeFileInfo); + vnodeInitDataBlockInfo(&pRuntimeEnv->loadBlockInfo); + vnodeInitLoadCompBlockInfo(&pRuntimeEnv->loadCompBlockInfo); /* * since we employ the output control mechanism in main loop. @@ -3881,26 +4565,26 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) pQuery->lastKey = pQuery->skey; // create runtime environment - SSchema *pTagSchema = NULL; - - tTagSchema *pTagSchemaInfo = pSupporter->pSidSet->pTagSchema; - if (pTagSchemaInfo != NULL) { - pTagSchema = pTagSchemaInfo->pSchema; - } + SColumnModel *pTagSchemaInfo = pSupporter->pSidSet->pColumnModel; // get one queried meter - SMeterObj *pMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[0]->sid); + SMeterObj *pMeter = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pSidSet->pSids[0]->sid); - pSupporter->runtimeEnv.pTSBuf = param; - pSupporter->runtimeEnv.cur.vnodeIndex = -1; + pRuntimeEnv->pTSBuf = param; + pRuntimeEnv->cur.vnodeIndex = -1; // set the ts-comp file traverse order if (param != NULL) { - int16_t order = (pQuery->order.order == pSupporter->runtimeEnv.pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; - tsBufSetTraverseOrder(pSupporter->runtimeEnv.pTSBuf, order); + int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSQL_SO_ASC : TSQL_SO_DESC; + tsBufSetTraverseOrder(pRuntimeEnv->pTSBuf, order); + } + + int32_t ret = setupQueryRuntimeEnv(pMeter, pQuery, &pSupporter->runtimeEnv, pTagSchemaInfo, TSQL_SO_ASC, true); + if (ret != TSDB_CODE_SUCCESS) { + return ret; } - int32_t ret = setupQueryRuntimeEnv(pMeter, pQuery, &pSupporter->runtimeEnv, pTagSchema, TSQL_SO_ASC, true); + ret = allocateRuntimeEnvBuf(pRuntimeEnv, pMeter); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3908,41 +4592,25 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) tSidSetSort(pSupporter->pSidSet); vnodeRecordAllFiles(pQInfo, pMeter->vnode); - if ((ret = allocateOutputBufForGroup(pSupporter, pQuery, true)) != TSDB_CODE_SUCCESS) { + if ((ret = createQueryOutputBuffer(pSupporter, pQuery, true)) != TSDB_CODE_SUCCESS) { return ret; } if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group by columns not tags; - pSupporter->runtimeEnv.hashList = taosInitIntHash(10039, sizeof(void *), taosHashInt); - pSupporter->runtimeEnv.usedIndex = 0; - pSupporter->runtimeEnv.pResult = pSupporter->pResult; + int16_t type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); + initSlidingWindowInfo(&pRuntimeEnv->swindowResInfo, 4096, type, pQuery->rowSize, pSupporter->pResult); } - if (pQuery->nAggTimeInterval != 0) { - getTmpfilePath("tb_metric_mmap", pSupporter->extBufFile); - pSupporter->meterOutputFd = open(pSupporter->extBufFile, O_CREAT | O_RDWR, 0666); - - if (!FD_VALID(pSupporter->meterOutputFd)) { - dError("QInfo:%p failed to create file: %s on disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); - return TSDB_CODE_SERV_OUT_OF_MEMORY; - } - - // set 4k page for each meter - pSupporter->numOfPages = pSupporter->numOfMeters; - - ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE); - pSupporter->runtimeEnv.numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / pQuery->rowSize; - pSupporter->lastPageId = -1; - pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE; - - pSupporter->meterOutputMMapBuf = - mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0); - if (pSupporter->meterOutputMMapBuf == MAP_FAILED) { - dError("QInfo:%p failed to map data file: %s to disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); - return TSDB_CODE_SERV_OUT_OF_MEMORY; + if (pQuery->nAggTimeInterval != 0 || isSumAvgRateQuery(pQuery)) { + // one page for each table at least + ret = createResultBuf(&pRuntimeEnv->pResultBuf, pSupporter->numOfMeters, pQuery->rowSize); + if (ret != TSDB_CODE_SUCCESS) { + return ret; } } - + + pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, true); + // metric query do not invoke interpolation, it will be done at the second-stage merge if (!isPointInterpoQuery(pQuery)) { pQuery->interpoType = TSDB_INTERPO_NONE; @@ -3950,8 +4618,9 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) TSKEY revisedStime = taosGetIntervalStartTimestamp(pSupporter->rawSKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit, pQuery->precision); - taosInitInterpoInfo(&pSupporter->runtimeEnv.interpoInfo, pQuery->order.order, revisedStime, 0, 0); - + taosInitInterpoInfo(&pRuntimeEnv->interpoInfo, pQuery->order.order, revisedStime, 0, 0); + pRuntimeEnv->stableQuery = true; + return TSDB_CODE_SUCCESS; } @@ -3969,7 +4638,7 @@ void vnodeDecMeterRefcnt(SQInfo *pQInfo) { } else { int32_t num = 0; for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { - SMeterObj *pMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[i]->sid); + SMeterObj *pMeter = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pSidSet->pSids[i]->sid); atomic_fetch_sub_32(&(pMeter->numOfQueries), 1); if (pMeter->numOfQueries > 0) { @@ -3989,32 +4658,18 @@ void vnodeDecMeterRefcnt(SQInfo *pQInfo) { } } -// todo merge with doRevisedResultsByLimit -void UNUSED_FUNC truncateResultByLimit(SQInfo *pQInfo, int64_t *final, int32_t *interpo) { - SQuery *pQuery = &(pQInfo->query); - - if (pQuery->limit.limit > 0 && ((*final) + pQInfo->pointsRead > pQuery->limit.limit)) { - int64_t num = (*final) + pQInfo->pointsRead - pQuery->limit.limit; - (*interpo) -= num; - (*final) -= num; - - setQueryStatus(pQuery, QUERY_COMPLETED); // query completed - } -} - -TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index) { +TSKEY getTimestampInCacheBlock(SQueryRuntimeEnv *pRuntimeEnv, SCacheBlock *pBlock, int32_t index) { if (pBlock == NULL || index >= pBlock->numOfPoints || index < 0) { return -1; } - TSKEY *ts = (TSKEY *)pBlock->offset[0]; - return ts[index]; + return ((TSKEY *)(pRuntimeEnv->primaryColBuffer->data))[index]; } /* * NOTE: pQuery->pos will not change, the corresponding data block will be loaded into buffer * loadDataBlockOnDemand will change the value of pQuery->pos, according to the pQuery->lastKey - * */ + */ TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { SQuery *pQuery = pRuntimeEnv->pQuery; @@ -4025,29 +4680,23 @@ TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); // this block must be loaded into buffer - SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; + SLoadDataBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; assert(pQuery->pos >= 0 && pQuery->pos < pBlock->numOfPoints); SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - bool loadTimestamp = true; - int32_t fileId = pQuery->fileId; - int32_t fileIndex = vnodeGetVnodeHeaderFileIdx(&fileId, pRuntimeEnv, pQuery->order.order); + int32_t fileIndex = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, pQuery->order.order); - if (!vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIndex)) { - dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, slot:%d load data block due to primary key required", - GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot); + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, slot:%d load data block due to primary key required", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot); - // todo handle failed to load data, file corrupted - // todo refactor the return value - int32_t ret = - loadDataBlockIntoMem(pBlock, &pQuery->pFields[pQuery->slot], pRuntimeEnv, fileIndex, loadTimestamp, true); - UNUSED(ret); - } + bool loadTS = true; + bool loadFields = true; + int32_t slot = pQuery->slot; - // the fields info is not loaded, load it into memory - if (pQuery->pFields == NULL || pQuery->pFields[pQuery->slot] == NULL) { - loadDataBlockFieldsInfo(pRuntimeEnv, pBlock, &pQuery->pFields[pQuery->slot]); + int32_t ret = loadDataBlockIntoMem(pBlock, &pQuery->pFields[slot], pRuntimeEnv, fileIndex, loadTS, loadFields); + if (ret != TSDB_CODE_SUCCESS) { + return -1; } SET_DATA_BLOCK_LOADED(pRuntimeEnv->blockStatus); @@ -4058,7 +4707,7 @@ TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { } // todo remove this function -static void getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { +static TSKEY getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; assert(pQuery->fileId == -1 && QUERY_IS_ASC_QUERY(pQuery)); @@ -4077,10 +4726,11 @@ static void getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { } else if (nextTimestamp > pQuery->ekey) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + return nextTimestamp; } -// TODO handle case that the cache is allocated but not assign to SMeterObj -void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn) { +TSKEY getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn) { SQuery * pQuery = pRuntimeEnv->pQuery; SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; @@ -4088,7 +4738,7 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear dTrace( "QInfo:%p vid:%d sid:%d id:%s cache block re-allocated to other meter, " - "try get query start position in file/cache, qrange:%lld-%lld, lastKey:%lld", + "try get query start position in file/cache, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); if (step == QUERY_DESC_FORWARD_STEP) { @@ -4098,7 +4748,7 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear */ bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); - dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position, fileId:%d, slot:%d, pos:%d", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position in file, fileId:%d, slot:%d, pos:%d", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot, pQuery->pos); if (ret) { @@ -4108,10 +4758,13 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear if (key < pQuery->ekey) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + return key; } else { setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return -1; // no data to check } - } else { + } else { // asc query bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_GREATER_EQUAL, searchFn); if (ret) { dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position, fileId:%d, slot:%d, pos:%d", pQInfo, @@ -4123,24 +4776,27 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear if (key > pQuery->ekey) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + return key; } else { /* - * all data in file is less than the pQuery->lastKey, try cache. + * all data in file is less than the pQuery->lastKey, try cache again. * cache block status will be set in getFirstDataBlockInCache function */ - getFirstDataBlockInCache(pRuntimeEnv); + TSKEY key = getFirstDataBlockInCache(pRuntimeEnv); dTrace("QInfo:%p vid:%d sid:%d id:%s find the new position in cache, fileId:%d, slot:%d, pos:%d", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot, pQuery->pos); + return key; } } } static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; - SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; assert(pQuery->fileId < 0); /* @@ -4160,6 +4816,7 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste int32_t currentSlot = pCacheInfo->currentSlot; int32_t firstSlot = getFirstCacheSlot(numOfBlocks, currentSlot, pCacheInfo); + if (step == QUERY_DESC_FORWARD_STEP && pQuery->slot == firstSlot) { bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); if (ret) { @@ -4172,7 +4829,6 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste // the skip operation does NOT set the startPos yet // assert(pRuntimeEnv->startPos.fileId < 0); - } else { setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); } @@ -4181,7 +4837,7 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste /* now still iterate the cache data blocks */ pQuery->slot = (pQuery->slot + step + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); /* * data in this cache block has been flushed to disk, then we should locate the start position in file. @@ -4194,7 +4850,7 @@ static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t ste } else { pQuery->pos = (QUERY_IS_ASC_QUERY(pQuery)) ? 0 : pBlock->numOfPoints - 1; - TSKEY startkey = getTimestampInCacheBlock(pBlock, pQuery->pos); + TSKEY startkey = getTimestampInCacheBlock(pRuntimeEnv, pBlock, pQuery->pos); if (startkey < 0) { setQueryStatus(pQuery, QUERY_COMPLETED); } @@ -4229,22 +4885,25 @@ static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __bl (step == QUERY_DESC_FORWARD_STEP && (pQuery->slot == 0))) { fileIndex = getNextDataFileCompInfo(pRuntimeEnv, pMeterObj, step); /* data maybe in cache */ - if (fileIndex < 0) { + + if (fileIndex >= 0) { // next file + pQuery->slot = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->numOfBlocks - 1; + pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; + } else { // try data in cache assert(pQuery->fileId == -1); + if (step == QUERY_ASC_FORWARD_STEP) { getFirstDataBlockInCache(pRuntimeEnv); - } else { /* no data any more */ + } else { // no data to check for desc order query setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); } return DISK_DATA_LOADED; - } else { - pQuery->slot = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->numOfBlocks - 1; - pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; } } else { // next block in the same file int32_t fid = pQuery->fileId; fileIndex = vnodeGetVnodeHeaderFileIdx(&fid, pRuntimeEnv, pQuery->order.order); + pQuery->slot += step; pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; } @@ -4256,14 +4915,11 @@ static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __bl return DISK_DATA_LOADED; } + // load data block function will change the value of pQuery->pos int32_t ret = LoadDatablockOnDemand(&pQuery->pBlock[pQuery->slot], &pQuery->pFields[pQuery->slot], &pRuntimeEnv->blockStatus, pRuntimeEnv, fileIndex, pQuery->slot, searchFn, true); if (ret != DISK_DATA_LOADED) { - /* - * if it is the last block of file, set current access position at the last point of the meter in this file, - * in order to get the correct next access point, - */ return ret; } } else { // data in cache @@ -4273,64 +4929,45 @@ static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __bl return DISK_DATA_LOADED; } -static void doHandleFileBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, - SData **sdata, int32_t *numOfRes, int32_t blockLoadStatus, int32_t *forwardStep) { +static void doHandleDataBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, + int32_t *numOfRes, int32_t blockLoadStatus, int32_t *forwardStep) { SQuery * pQuery = pRuntimeEnv->pQuery; SQueryCostSummary *pSummary = &pRuntimeEnv->summary; + TSKEY * primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; int64_t start = taosGetTimestampUs(); - SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); - *pblockInfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + if (IS_DISK_DATA_BLOCK(pQuery)) { + SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); + *pblockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_FILE_BLOCK); - TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; + if (blockLoadStatus == DISK_DATA_LOADED) { + *forwardStep = applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, pQuery->pFields[pQuery->slot], + searchFn, numOfRes); + } else { + *forwardStep = pblockInfo->size; + } - if (blockLoadStatus == DISK_DATA_LOADED) { - *forwardStep = applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, (char *)sdata, - pQuery->pFields[pQuery->slot], searchFn, numOfRes); + pSummary->fileTimeUs += (taosGetTimestampUs() - start); } else { - *forwardStep = pblockInfo->size; - } - - pSummary->fileTimeUs += (taosGetTimestampUs() - start); -} - -static void doHandleCacheBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, - int32_t *numOfRes, int32_t *forwardStep) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; - SQueryCostSummary *pSummary = &pRuntimeEnv->summary; + assert(vnodeIsDatablockLoaded(pRuntimeEnv, pRuntimeEnv->pMeterObj, -1, true) == DISK_BLOCK_NO_NEED_TO_LOAD); - int64_t start = taosGetTimestampUs(); + SCacheBlock *pBlock = getCacheDataBlock(pRuntimeEnv->pMeterObj, pRuntimeEnv, pQuery->slot); + *pblockInfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_CACHE_BLOCK); - // todo refactor getCacheDataBlock. - //#ifdef _CACHE_INVALID_TEST - // taosMsleep(20000); - //#endif - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - while (pBlock == NULL) { - getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); + *forwardStep = applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, NULL, searchFn, numOfRes); - if (IS_DISK_DATA_BLOCK(pQuery)) { // do check data block in file - break; - } else { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); - } + pSummary->cacheTimeUs += (taosGetTimestampUs() - start); } +} - if (IS_DISK_DATA_BLOCK(pQuery)) { - // start query position is located in file, try query on file block - doHandleFileBlockImpl(pRuntimeEnv, pblockInfo, searchFn, pRuntimeEnv->colDataBuffer, numOfRes, DISK_DATA_LOADED, - forwardStep); - } else { // also query in cache block - *pblockInfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK); - - TSKEY *primaryKeys = (TSKEY *)pBlock->offset[0]; - *forwardStep = - applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, (char *)pBlock, NULL, searchFn, numOfRes); +static void getNextLogicalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow* pTimeWindow) { + SQuery *pQuery = pRuntimeEnv->pQuery; - pSummary->cacheTimeUs += (taosGetTimestampUs() - start); - } + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + pTimeWindow->skey += (pQuery->slidingTime * factor); + pTimeWindow->ekey += (pQuery->slidingTime * factor); } static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { @@ -4341,7 +4978,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { int64_t cnt = 0; SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - SData ** sdata = pRuntimeEnv->colDataBuffer; __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; int32_t blockLoadStatus = DISK_DATA_LOADED; @@ -4353,7 +4989,8 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SPositionInfo *pStartPos = &pRuntimeEnv->startPos; assert(pQuery->slot == pStartPos->slot); - dTrace("QInfo:%p query start, qrange:%lld-%lld, lastkey:%lld, order:%d, start fileId:%d, slot:%d, pos:%d, bstatus:%d", + dTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 + ", order:%d, start fileId:%d, slot:%d, pos:%d, bstatus:%d", GET_QINFO_ADDR(pQuery), pQuery->skey, pQuery->ekey, pQuery->lastKey, pQuery->order.order, pStartPos->fileId, pStartPos->slot, pStartPos->pos, pRuntimeEnv->blockStatus); @@ -4366,14 +5003,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { int32_t numOfRes = 0; SBlockInfo blockInfo = {0}; + doHandleDataBlockImpl(pRuntimeEnv, &blockInfo, searchFn, &numOfRes, blockLoadStatus, &forwardStep); - if (IS_DISK_DATA_BLOCK(pQuery)) { - doHandleFileBlockImpl(pRuntimeEnv, &blockInfo, searchFn, sdata, &numOfRes, blockLoadStatus, &forwardStep); - } else { - doHandleCacheBlockImpl(pRuntimeEnv, &blockInfo, searchFn, &numOfRes, &forwardStep); - } - - dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d, rows:%d, checked:%d", + dTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 + ", fileId:%d, slot:%d, pos:%d, bstatus:%d, rows:%d, checked:%d", GET_QINFO_ADDR(pQuery), blockInfo.keyFirst, blockInfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, pRuntimeEnv->blockStatus, blockInfo.size, forwardStep); @@ -4422,10 +5055,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } // check next block - void *pNextBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + void *pNextBlock = getGenericDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); int32_t blockType = (IS_DISK_DATA_BLOCK(pQuery)) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; - blockInfo = getBlockBasicInfo(pNextBlock, blockType); + blockInfo = getBlockBasicInfo(pRuntimeEnv, pNextBlock, blockType); if (!checkQueryRangeAgainstNextBlock(&blockInfo, pRuntimeEnv)) { break; } @@ -4436,8 +5069,8 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { static void updatelastkey(SQuery *pQuery, SMeterQueryInfo *pMeterQInfo) { pMeterQInfo->lastKey = pQuery->lastKey; } -void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32_t blockStatus, char *data, - SBlockInfo *pBlockBasicInfo, SMeterDataInfo *pDataHeadInfoEx, SField *pFields, +void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32_t blockStatus, + SBlockInfo *pBlockBasicInfo, SMeterDataInfo *pMeterDataInfo, SField *pFields, __block_search_fn_t searchFn) { /* cache blocks may be assign to other meter, abort */ if (pBlockBasicInfo->size <= 0) { @@ -4449,22 +5082,21 @@ void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32 if (pQuery->nAggTimeInterval == 0) { // not interval query int32_t numOfRes = 0; - applyFunctionsOnBlock(pRuntimeEnv, pBlockBasicInfo, primaryKeys, data, pFields, searchFn, &numOfRes); + applyFunctionsOnBlock(pRuntimeEnv, pBlockBasicInfo, primaryKeys, pFields, searchFn, &numOfRes); // note: only fixed number of output for each group by operation if (numOfRes > 0) { - pSupporter->pResult[pDataHeadInfoEx->groupIdx].numOfRows = numOfRes; + pSupporter->pResult[pMeterDataInfo->groupIdx].numOfRows = numOfRes; } // used to decide the correct start position in cache after check all data in files - updatelastkey(pQuery, pDataHeadInfoEx->pMeterQInfo); + updatelastkey(pQuery, pMeterDataInfo->pMeterQInfo); if (pRuntimeEnv->pTSBuf != NULL) { - pDataHeadInfoEx->pMeterQInfo->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + pMeterDataInfo->pMeterQInfo->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); } } else { - applyIntervalQueryOnBlock(pSupporter, pDataHeadInfoEx, data, primaryKeys, pBlockBasicInfo, blockStatus, pFields, - searchFn); + applyIntervalQueryOnBlock(pSupporter, pMeterDataInfo, pBlockBasicInfo, blockStatus, pFields, searchFn); } } @@ -4472,26 +5104,27 @@ void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32 * set tag value in SQLFunctionCtx * e.g.,tag information into input buffer */ -static void doSetTagValueInParam(tTagSchema *pTagSchema, int32_t tagColIdx, SMeterSidExtInfo *pMeterSidInfo, +static void doSetTagValueInParam(SColumnModel *pTagSchema, int32_t tagColIdx, SMeterSidExtInfo *pMeterSidInfo, tVariant *param) { assert(tagColIdx >= 0); - int32_t *fieldValueOffset = pTagSchema->colOffset; - - void * pStr = (char *)pMeterSidInfo->tags + fieldValueOffset[tagColIdx]; - SSchema *pCol = &pTagSchema->pSchema[tagColIdx]; + int16_t offset = getColumnModelOffset(pTagSchema, tagColIdx); + + void * pStr = (char *)pMeterSidInfo->tags + offset; + SSchema *pCol = getColumnModelSchema(pTagSchema, tagColIdx); tVariantDestroy(param); - tVariantCreateFromBinary(param, pStr, pCol->bytes, pCol->type); if (isNull(pStr, pCol->type)) { param->nType = TSDB_DATA_TYPE_NULL; + } else { + tVariantCreateFromBinary(param, pStr, pCol->bytes, pCol->type); } } void vnodeSetTagValueInParam(tSidSet *pSidSet, SQueryRuntimeEnv *pRuntimeEnv, SMeterSidExtInfo *pMeterSidInfo) { SQuery * pQuery = pRuntimeEnv->pQuery; - tTagSchema *pTagSchema = pSidSet->pTagSchema; + SColumnModel *pTagSchema = pSidSet->pColumnModel; SSqlFuncExprMsg *pFuncMsg = &pQuery->pSelectExpr[0].pBase; if (pQuery->numOfOutputCols == 1 && pFuncMsg->functionId == TSDB_FUNC_TS_COMP) { @@ -4559,31 +5192,30 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, tFilePage } static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) { - if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || - functionId == TSDB_FUNC_LAST_DST) { + if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { switch (srcDataType) { case TSDB_DATA_TYPE_BINARY: - printf("%ld,%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: - printf("%ld,%d\t", *(TSKEY *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_SMALLINT: - printf("%ld,%d\t", *(TSKEY *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - printf("%ld,%ld\t", *(TSKEY *)data, *(TSKEY *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%" PRId64 "\t", *(TSKEY *)data, *(TSKEY *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_INT: - printf("%ld,%d\t", *(TSKEY *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_FLOAT: - printf("%ld,%f\t", *(TSKEY *)data, *(float *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%f\t", *(TSKEY *)data, *(float *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%ld,%lf\t", *(TSKEY *)data, *(double *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%lf\t", *(TSKEY *)data, *(double *)(data + TSDB_KEYSIZE + 1)); break; } } else if (functionId == TSDB_FUNC_AVG) { @@ -4592,8 +5224,8 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) printf("%lf,%lf\t", *(double *)data, *(double *)(data + sizeof(double))); } else if (functionId == TSDB_FUNC_TWA) { data += 1; - printf("%lf,%ld,%ld,%ld\t", *(double *)data, *(int64_t *)(data + 8), *(int64_t *)(data + 16), - *(int64_t *)(data + 24)); + printf("%lf,%" PRId64 ",%" PRId64 ",%" PRId64 "\t", *(double *)data, *(int64_t *)(data + 8), + *(int64_t *)(data + 16), *(int64_t *)(data + 24)); } else if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) { switch (srcDataType) { case TSDB_DATA_TYPE_TINYINT: @@ -4605,7 +5237,7 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - printf("%ld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int *)data); @@ -4621,7 +5253,7 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) if (srcDataType == TSDB_DATA_TYPE_FLOAT || srcDataType == TSDB_DATA_TYPE_DOUBLE) { printf("%lf\t", *(float *)data); } else { - printf("%ld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); } } else { printf("%s\t", data); @@ -4653,7 +5285,7 @@ void UNUSED_FUNC displayInterResult(SData **pdata, SQuery *pQuery, int32_t numOf } case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%ld\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); + printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); @@ -4670,19 +5302,9 @@ void UNUSED_FUNC displayInterResult(SData **pdata, SQuery *pQuery, int32_t numOf } } -static tFilePage *getFilePage(SMeterQuerySupportObj *pSupporter, int32_t pageId) { - assert(pageId <= pSupporter->lastPageId && pageId >= 0); - return (tFilePage *)(pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * pageId); -} - -static tFilePage *getMeterDataPage(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, int32_t pageId) { - SMeterQueryInfo *pInfo = pInfoEx->pMeterQInfo; - if (pageId >= pInfo->numOfPages) { - return NULL; - } - - int32_t realId = pInfo->pageList[pageId]; - return getFilePage(pSupporter, realId); +static tFilePage *getMeterDataPage(SQueryResultBuf* pResultBuf, SMeterQueryInfo *pMeterQueryInfo, int32_t index) { + SIDList pList = getDataBufPagesIdList(pResultBuf, pMeterQueryInfo->sid); + return getResultBufferPageById(pResultBuf, pList.pData[index]); } typedef struct Position { @@ -4691,14 +5313,16 @@ typedef struct Position { } Position; typedef struct SCompSupporter { - SMeterDataInfo ** pInfoEx; + SMeterDataInfo ** pMeterDataInfo; Position * pPosition; SMeterQuerySupportObj *pSupporter; } SCompSupporter; int64_t getCurrentTimestamp(SCompSupporter *pSupportor, int32_t meterIdx) { Position * pPos = &pSupportor->pPosition[meterIdx]; - tFilePage *pPage = getMeterDataPage(pSupportor->pSupporter, pSupportor->pInfoEx[meterIdx], pPos->pageIdx); + tFilePage *pPage = getMeterDataPage(pSupportor->pSupporter->runtimeEnv.pResultBuf, + pSupportor->pMeterDataInfo[meterIdx]->pMeterQInfo, pPos->pageIdx); + return *(int64_t *)(pPage->data + TSDB_KEYSIZE * pPos->rowIdx); } @@ -4706,10 +5330,11 @@ int32_t meterResultComparator(const void *pLeft, const void *pRight, void *param int32_t left = *(int32_t *)pLeft; int32_t right = *(int32_t *)pRight; - SCompSupporter *supportor = (SCompSupporter *)param; - - Position leftPos = supportor->pPosition[left]; - Position rightPos = supportor->pPosition[right]; + SCompSupporter *supporter = (SCompSupporter *)param; + SQueryResultBuf* pResultBuf = supporter->pSupporter->runtimeEnv.pResultBuf; + + Position leftPos = supporter->pPosition[left]; + Position rightPos = supporter->pPosition[right]; /* left source is exhausted */ if (leftPos.pageIdx == -1 && leftPos.rowIdx == -1) { @@ -4721,10 +5346,10 @@ int32_t meterResultComparator(const void *pLeft, const void *pRight, void *param return -1; } - tFilePage *pPageLeft = getMeterDataPage(supportor->pSupporter, supportor->pInfoEx[left], leftPos.pageIdx); + tFilePage *pPageLeft = getMeterDataPage(pResultBuf, supporter->pMeterDataInfo[left]->pMeterQInfo, leftPos.pageIdx); int64_t leftTimestamp = *(int64_t *)(pPageLeft->data + TSDB_KEYSIZE * leftPos.rowIdx); - tFilePage *pPageRight = getMeterDataPage(supportor->pSupporter, supportor->pInfoEx[right], rightPos.pageIdx); + tFilePage *pPageRight = getMeterDataPage(pResultBuf, supporter->pMeterDataInfo[right]->pMeterQInfo, rightPos.pageIdx); int64_t rightTimestamp = *(int64_t *)(pPageRight->data + TSDB_KEYSIZE * rightPos.rowIdx); if (leftTimestamp == rightTimestamp) { @@ -4739,16 +5364,20 @@ int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj *pSupporter) { SQuery * pQuery = pRuntimeEnv->pQuery; int64_t st = taosGetTimestampMs(); + int32_t ret = TSDB_CODE_SUCCESS; while (pSupporter->subgroupIdx < pSupporter->pSidSet->numOfSubSet) { int32_t start = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx]; int32_t end = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx + 1]; - int32_t ret = - doMergeMetersResultsToGroupRes(pSupporter, pQuery, pRuntimeEnv, pSupporter->pMeterDataInfo, start, end); + ret = doMergeMetersResultsToGroupRes(pSupporter, pQuery, pRuntimeEnv, pSupporter->pMeterDataInfo, start, end); + if (ret < 0) { // not enough disk space to save the data into disk + return -1; + } + pSupporter->subgroupIdx += 1; - /* this group generates at least one result, return results */ + // this group generates at least one result, return results if (ret > 0) { break; } @@ -4760,7 +5389,7 @@ int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj *pSupporter) { dTrace("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%lldms", GET_QINFO_ADDR(pQuery), pSupporter->subgroupIdx - 1, pSupporter->pSidSet->numOfSubSet, taosGetTimestampMs() - st); - return pSupporter->numOfGroupResultPages; + return TSDB_CODE_SUCCESS; } void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery) { @@ -4768,7 +5397,9 @@ void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery) pSupporter->numOfGroupResultPages = 0; // current results of group has been sent to client, try next group - mergeMetersResultToOneGroups(pSupporter); + if (mergeMetersResultToOneGroups(pSupporter) != TSDB_CODE_SUCCESS) { + return; // failed to save data in the disk + } // set current query completed if (pSupporter->numOfGroupResultPages == 0 && pSupporter->subgroupIdx == pSupporter->pSidSet->numOfSubSet) { @@ -4778,23 +5409,40 @@ void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery) } SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; - char * pStart = pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * (pSupporter->lastPageId + 1) + - pSupporter->groupResultSize * pSupporter->offset; - - uint64_t numOfElem = ((tFilePage *)pStart)->numOfElems; - assert(numOfElem <= pQuery->pointsToRead); - - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memcpy(pQuery->sdata[i], pStart, pRuntimeEnv->pCtx[i].outputBytes * numOfElem + sizeof(tFilePage)); - pStart += pRuntimeEnv->pCtx[i].outputBytes * pQuery->pointsToRead + sizeof(tFilePage); + SQueryResultBuf* pResultBuf = pRuntimeEnv->pResultBuf; + + SIDList list = getDataBufPagesIdList(pResultBuf, 200000 + pSupporter->offset + (pSupporter->subgroupIdx - 1)* 10000); + + int32_t total = 0; + for(int32_t i = 0; i < list.size; ++i) { + tFilePage* pData = getResultBufferPageById(pResultBuf, list.pData[i]); + total += pData->numOfElems; + } + + pQuery->sdata[0]->len = total; + + int32_t offset = 0; + for(int32_t num = 0; num < list.size; ++num) { + tFilePage* pData = getResultBufferPageById(pResultBuf, list.pData[num]); + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; + char* pDest = pQuery->sdata[i]->data; + + memcpy(pDest + offset*bytes, pData->data + pRuntimeEnv->offset[i] * pData->numOfElems, bytes * pData->numOfElems); + } + + offset += pData->numOfElems; } - pQuery->pointsRead += numOfElem; + assert(pQuery->pointsRead == 0); + + pQuery->pointsRead += pQuery->sdata[0]->len; pSupporter->offset += 1; } int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv, - SMeterDataInfo *pMeterHeadDataInfo, int32_t start, int32_t end) { + SMeterDataInfo *pMeterDataInfo, int32_t start, int32_t end) { // calculate the maximum required space if (pSupporter->groupResultSize == 0) { for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { @@ -4808,8 +5456,11 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery int32_t numOfMeters = 0; for (int32_t i = start; i < end; ++i) { - if (pMeterHeadDataInfo[i].pMeterQInfo->numOfPages > 0 && pMeterHeadDataInfo[i].pMeterQInfo->numOfRes > 0) { - pValidMeter[numOfMeters] = &pMeterHeadDataInfo[i]; + int32_t sid = pMeterDataInfo[i].pMeterQInfo->sid; + SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, sid); + + if (list.size > 0 && pMeterDataInfo[i].pMeterQInfo->numOfRes > 0) { + pValidMeter[numOfMeters] = &pMeterDataInfo[i]; // set the merge start position: page:0, index:0 posArray[numOfMeters].pageIdx = 0; posArray[numOfMeters++].rowIdx = 0; @@ -4838,19 +5489,23 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery while (1) { int32_t pos = pTree->pNode[0].index; Position * position = &cs.pPosition[pos]; - tFilePage *pPage = getMeterDataPage(cs.pSupporter, pValidMeter[pos], position->pageIdx); + SQueryResultBuf* pResultBuf = cs.pSupporter->runtimeEnv.pResultBuf; + tFilePage *pPage = getMeterDataPage(pResultBuf, pValidMeter[pos]->pMeterQInfo, position->pageIdx); int64_t ts = getCurrentTimestamp(&cs, pos); - if (ts == lastTimestamp) { // merge with the last one + if (ts == lastTimestamp) {// merge with the last one doMerge(pRuntimeEnv, ts, pPage, position->rowIdx, true); } else { // copy data to disk buffer if (buffer[0]->numOfElems == pQuery->pointsToRead) { - flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv); + if (flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv) != TSDB_CODE_SUCCESS) { + return -1; + } + resetMergeResultBuf(pQuery, pCtx); } - pPage = getMeterDataPage(cs.pSupporter, pValidMeter[pos], position->pageIdx); + pPage = getMeterDataPage(pResultBuf, pValidMeter[pos]->pMeterQInfo, position->pageIdx); if (pPage->numOfElems <= 0) { // current source data page is empty // do nothing } else { @@ -4866,17 +5521,19 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery cs.pPosition[pos].pageIdx += 1; // try next page // check if current page is empty or not. if it is empty, ignore it and try next - if (cs.pPosition[pos].pageIdx <= cs.pInfoEx[pos]->pMeterQInfo->numOfPages - 1) { - tFilePage *newPage = getMeterDataPage(cs.pSupporter, pValidMeter[pos], position->pageIdx); + SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, cs.pMeterDataInfo[pos]->pMeterQInfo->sid); + if (cs.pPosition[pos].pageIdx <= list.size - 1) { + tFilePage *newPage = getMeterDataPage(pResultBuf, pValidMeter[pos]->pMeterQInfo, position->pageIdx); + + // if current source data page is null, it must be the last page of source output page if (newPage->numOfElems <= 0) { - // if current source data page is null, it must be the last page of source output page cs.pPosition[pos].pageIdx += 1; - assert(cs.pPosition[pos].pageIdx >= cs.pInfoEx[pos]->pMeterQInfo->numOfPages - 1); + assert(cs.pPosition[pos].pageIdx >= list.size - 1); } } // the following code must be executed if current source pages are exhausted - if (cs.pPosition[pos].pageIdx >= cs.pInfoEx[pos]->pMeterQInfo->numOfPages) { + if (cs.pPosition[pos].pageIdx >= list.size) { cs.pPosition[pos].pageIdx = -1; cs.pPosition[pos].rowIdx = -1; @@ -4893,7 +5550,15 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery } if (buffer[0]->numOfElems != 0) { // there are data in buffer - flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv); + if (flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv) != TSDB_CODE_SUCCESS) { +// dError("QInfo:%p failed to flush data into temp file, abort query", GET_QINFO_ADDR(pQuery), +// pSupporter->extBufFile); + tfree(pTree); + tfree(pValidMeter); + tfree(posArray); + + return -1; + } } int64_t endt = taosGetTimestampMs(); @@ -4902,7 +5567,7 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); #endif - dTrace("QInfo:%p result merge completed, elapsed time:%lld ms", GET_QINFO_ADDR(pQuery), endt - startt); + dTrace("QInfo:%p result merge completed, elapsed time:%" PRId64 " ms", GET_QINFO_ADDR(pQuery), endt - startt); tfree(pTree); tfree(pValidMeter); tfree(posArray); @@ -4912,49 +5577,41 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery return pSupporter->numOfGroupResultPages; } -static void extendDiskBuf(SMeterQuerySupportObj *pSupporter, int32_t numOfPages) { - assert(pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE == pSupporter->bufSize); - - int32_t ret = munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); - pSupporter->numOfPages = numOfPages; - - // disk-based output buffer is exhausted, try to extend the disk-based buffer - ret = ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE); - if (ret != 0) { - perror("error in allocate the disk-based buffer"); - return; - } - - pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE; - pSupporter->meterOutputMMapBuf = - mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0); -} - -void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, const SQueryRuntimeEnv *pRuntimeEnv) { - int32_t numOfMeterResultBufPages = pSupporter->lastPageId + 1; - int64_t dstSize = numOfMeterResultBufPages * DEFAULT_INTERN_BUF_SIZE + - pSupporter->groupResultSize * (pSupporter->numOfGroupResultPages + 1); - - int32_t requiredPages = pSupporter->numOfPages; - if (requiredPages * DEFAULT_INTERN_BUF_SIZE < dstSize) { - while (requiredPages * DEFAULT_INTERN_BUF_SIZE < dstSize) { - requiredPages += pSupporter->numOfMeters; +int32_t flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, + const SQueryRuntimeEnv *pRuntimeEnv) { + SQueryResultBuf* pResultBuf = pRuntimeEnv->pResultBuf; + int32_t capacity = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage))/ pQuery->rowSize; + + // the base value for group result, since the maximum number of table for each vnode will not exceed 100,000. + int32_t base = 200000; + int32_t pageId = -1; + + int32_t remain = pQuery->sdata[0]->len; + int32_t offset = 0; + + while(remain > 0) { + int32_t r = remain; + if (r > capacity) { + r = capacity; } - - extendDiskBuf(pSupporter, requiredPages); - } - - char *lastPosition = pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * numOfMeterResultBufPages + - pSupporter->groupResultSize * pSupporter->numOfGroupResultPages; - - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - int32_t size = pRuntimeEnv->pCtx[i].outputBytes * pQuery->sdata[0]->len + sizeof(tFilePage); - memcpy(lastPosition, pQuery->sdata[i], size); - - lastPosition += pRuntimeEnv->pCtx[i].outputBytes * pQuery->pointsToRead + sizeof(tFilePage); + + tFilePage* buf = getNewDataBuf(pResultBuf, base + pSupporter->subgroupIdx*10000 + pSupporter->numOfGroupResultPages, &pageId); + + //pagewise copy to dest buffer + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; + buf->numOfElems = r; + + memcpy(buf->data + pRuntimeEnv->offset[i] * buf->numOfElems, ((char*)pQuery->sdata[i]->data) + offset * bytes, + buf->numOfElems * bytes); + } + + offset += r; + remain -= r; } - + pSupporter->numOfGroupResultPages += 1; + return TSDB_CODE_SUCCESS; } void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx) { @@ -4972,7 +5629,7 @@ void setMeterDataInfo(SMeterDataInfo *pMeterDataInfo, SMeterObj *pMeterObj, int3 pMeterDataInfo->meterOrderIdx = meterIdx; } -void doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { +int32_t doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; @@ -4983,26 +5640,42 @@ void doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { if (pMeterInfo[i].pMeterQInfo != NULL && pMeterInfo[i].pMeterQInfo->lastResRows > 0) { int32_t index = pMeterInfo[i].meterOrderIdx; - pRuntimeEnv->pMeterObj = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[index]->sid); + pRuntimeEnv->pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pSidSet->pSids[index]->sid); assert(pRuntimeEnv->pMeterObj == pMeterInfo[i].pMeterObj); - setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo); - saveResult(pSupporter, pMeterInfo[i].pMeterQInfo, pMeterInfo[i].pMeterQInfo->lastResRows); + int32_t ret = setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + ret = saveResult(pSupporter, pMeterInfo[i].pMeterQInfo, pMeterInfo[i].pMeterQInfo->lastResRows); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } } } } + + return TSDB_CODE_SUCCESS; } void disableFunctForSuppleScan(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0)) { for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { pRuntimeEnv->pCtx[i].order = (pRuntimeEnv->pCtx[i].order) ^ 1; } - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if (!pStatus->closed) { + continue; + } + + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; // open/close the specified query for each group result for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5046,34 +5719,62 @@ void enableFunctForMasterScan(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { pQuery->order.order = (pQuery->order.order ^ 1); } -void createGroupResultBuf(SQuery *pQuery, SOutputRes *pOneResult, bool isMetricQuery) { - int32_t numOfOutput = pQuery->numOfOutputCols; - - pOneResult->resultInfo = calloc((size_t)numOfOutput, sizeof(SResultInfo)); - - pOneResult->result = malloc(POINTER_BYTES * numOfOutput); - for (int32_t i = 0; i < numOfOutput; ++i) { - size_t size = pQuery->pSelectExpr[i].interResBytes; - SResultInfo *pResInfo = &pOneResult->resultInfo[i]; - - pOneResult->result[i] = malloc(sizeof(tFilePage) + size * pOneResult->nAlloc); - pOneResult->result[i]->numOfElems = 0; +void createQueryResultBuf(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResultRow, bool isSTableQuery, SPosInfo *posInfo) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + int32_t numOfCols = pQuery->numOfOutputCols; - setResultInfoBuf(pResInfo, (int32_t)size, isMetricQuery); + pResultRow->resultInfo = calloc((size_t)numOfCols, sizeof(SResultInfo)); + pResultRow->pos = *posInfo;//page->data + (pRuntimeEnv->offset[i] * pRuntimeEnv->numOfRowsPerPage) + page->numOfElems*s1; + + for (int32_t i = 0; i < numOfCols; ++i) { + SResultInfo *pResultInfo = &pResultRow->resultInfo[i]; + size_t size = pQuery->pSelectExpr[i].interResBytes; + setResultInfoBuf(pResultInfo, (int32_t)size, isSTableQuery); } } -void clearGroupResultBuf(SOutputRes *pOneOutputRes, int32_t nOutputCols) { +void clearGroupResultBuf(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pOneOutputRes) { if (pOneOutputRes == NULL) { return; } - for (int32_t i = 0; i < nOutputCols; ++i) { - SResultInfo *pResInfo = &pOneOutputRes->resultInfo[i]; - int32_t size = sizeof(tFilePage) + pResInfo->bufLen * pOneOutputRes->nAlloc; + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { + SResultInfo *pResultInfo = &pOneOutputRes->resultInfo[i]; +// int32_t size = sizeof(tFilePage) + pResultInfo->bufLen * pOneOutputRes->nAlloc; + +// memset(pOneOutputRes->pos[i], 0, (size_t)size); + char* s = getPosInResultPage(pRuntimeEnv, i, pOneOutputRes); + size_t size = pRuntimeEnv->pQuery->pSelectExpr[i].resBytes; + memset(s, 0, size); + + resetResultInfo(pResultInfo); + } +} + +void copyGroupResultBuf(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes* dst, const SOutputRes* src) { + dst->numOfRows = src->numOfRows; + dst->nAlloc = src->nAlloc; + + int32_t nOutputCols = pRuntimeEnv->pQuery->numOfOutputCols; + + for(int32_t i = 0; i < nOutputCols; ++i) { + SResultInfo *pDst = &dst->resultInfo[i]; + SResultInfo *pSrc = &src->resultInfo[i]; + + char* buf = pDst->interResultBuf; + memcpy(pDst, pSrc, sizeof(SResultInfo)); + pDst->interResultBuf = buf; // restore the allocated buffer + + // copy the result info struct + memcpy(pDst->interResultBuf, pSrc->interResultBuf, pDst->bufLen); - memset(pOneOutputRes->result[i], 0, (size_t)size); - resetResultInfo(pResInfo); + // copy the output buffer data from src to dst, the position info keep unchanged + char* dstBuf = getPosInResultPage(pRuntimeEnv, i, dst); + char* srcBuf = getPosInResultPage(pRuntimeEnv, i, src); + size_t s = pRuntimeEnv->pQuery->pSelectExpr[i].resBytes; + + memcpy(dstBuf, srcBuf, s); } } @@ -5083,12 +5784,12 @@ void destroyGroupResultBuf(SOutputRes *pOneOutputRes, int32_t nOutputCols) { } for (int32_t i = 0; i < nOutputCols; ++i) { - free(pOneOutputRes->result[i]); +// free(pOneOutputRes->pos[i]); free(pOneOutputRes->resultInfo[i].interResultBuf); } free(pOneOutputRes->resultInfo); - free(pOneOutputRes->result); +// free(pOneOutputRes->result); } void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { @@ -5097,13 +5798,7 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - - // ts_comp query does not required reversed output - if (QUERY_IS_ASC_QUERY(pQuery) || isTSCompQuery(pQuery)) { - pCtx->aOutputBuf = pQuery->sdata[i]->data; - } else { // point to the last position of output buffer for desc query - pCtx->aOutputBuf = pQuery->sdata[i]->data + (rows - 1) * pCtx->outputBytes; - } + pCtx->aOutputBuf = pQuery->sdata[i]->data; /* * set the output buffer information and intermediate buffer @@ -5126,7 +5821,6 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); // reset the execution contexts for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5135,7 +5829,7 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { // set next output position if (IS_OUTER_FORWARD(aAggs[functionId].nStatus)) { - pRuntimeEnv->pCtx[j].aOutputBuf += pRuntimeEnv->pCtx[j].outputBytes * output * factor; + pRuntimeEnv->pCtx[j].aOutputBuf += pRuntimeEnv->pCtx[j].outputBytes * output /** factor*/; } if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { @@ -5146,7 +5840,7 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { * * diff function is handled in multi-output function */ - pRuntimeEnv->pCtx[j].ptsOutputBuf += TSDB_KEYSIZE * output * factor; + pRuntimeEnv->pCtx[j].ptsOutputBuf += TSDB_KEYSIZE * output/* * factor*/; } resetResultInfo(pRuntimeEnv->pCtx[j].resultInfo); @@ -5159,6 +5853,7 @@ void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { int32_t functionId = pQuery->pSelectExpr[j].pBase.functionId; pRuntimeEnv->pCtx[j].currentStage = 0; + aAggs[functionId].init(&pRuntimeEnv->pCtx[j]); } } @@ -5181,30 +5876,17 @@ void doSkipResults(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->over &= (~QUERY_RESBUF_FULL); } else { int32_t numOfSkip = (int32_t)pQuery->limit.offset; - int32_t size = pQuery->pointsRead; - pQuery->pointsRead -= numOfSkip; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; - int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; - if (QUERY_IS_ASC_QUERY(pQuery)) { - memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + bytes * numOfSkip, pQuery->pointsRead * bytes); - } else { // DESC query - int32_t maxrows = pQuery->pointsToRead; - - memmove(pQuery->sdata[i]->data + (maxrows - pQuery->pointsRead) * bytes, - pQuery->sdata[i]->data + (maxrows - size) * bytes, pQuery->pointsRead * bytes); - } - - pRuntimeEnv->pCtx[i].aOutputBuf -= bytes * numOfSkip * step; + memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + bytes * numOfSkip, pQuery->pointsRead * bytes); + pRuntimeEnv->pCtx[i].aOutputBuf += bytes * numOfSkip; if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { - pRuntimeEnv->pCtx[i].ptsOutputBuf -= TSDB_KEYSIZE * numOfSkip * step; + pRuntimeEnv->pCtx[i].ptsOutputBuf += TSDB_KEYSIZE * numOfSkip; } } @@ -5212,27 +5894,6 @@ void doSkipResults(SQueryRuntimeEnv *pRuntimeEnv) { } } -/** - * move remain data to the start position of output buffer - * @param pRuntimeEnv - */ -void moveDescOrderResultsToFront(SQueryRuntimeEnv *pRuntimeEnv) { - SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t maxrows = pQuery->pointsToRead; - - if (QUERY_IS_ASC_QUERY(pQuery) || isTSCompQuery(pQuery)) { - return; - } - - if (pQuery->pointsRead > 0 && pQuery->pointsRead < maxrows) { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; - memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + (maxrows - pQuery->pointsRead) * bytes, - pQuery->pointsRead * bytes); - } - } -} - typedef struct SQueryStatus { SPositionInfo start; SPositionInfo next; @@ -5252,6 +5913,9 @@ static void queryStatusSave(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus pStatus->overStatus = pQuery->over; pStatus->lastKey = pQuery->lastKey; + pStatus->skey = pQuery->skey; + pStatus->ekey = pQuery->ekey; + pStatus->start = pRuntimeEnv->startPos; pStatus->next = pRuntimeEnv->nextPos; pStatus->end = pRuntimeEnv->endPos; @@ -5268,13 +5932,18 @@ static void queryStatusSave(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus SWAP(pQuery->skey, pQuery->ekey, TSKEY); pQuery->lastKey = pQuery->skey; pRuntimeEnv->startPos = pRuntimeEnv->endPos; + + SWAP(pRuntimeEnv->intervalWindow.skey, pRuntimeEnv->intervalWindow.ekey, TSKEY); } static void queryStatusRestore(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus) { SQuery *pQuery = pRuntimeEnv->pQuery; SWAP(pQuery->skey, pQuery->ekey, TSKEY); - + SWAP(pRuntimeEnv->intervalWindow.skey, pRuntimeEnv->intervalWindow.ekey, TSKEY); + pQuery->lastKey = pStatus->lastKey; + pQuery->skey = pStatus->skey; + pQuery->ekey = pStatus->ekey; pQuery->over = pStatus->overStatus; @@ -5293,10 +5962,13 @@ static void doSingleMeterSupplementScan(SQueryRuntimeEnv *pRuntimeEnv) { return; } + dTrace("QInfo:%p start to supp scan", GET_QINFO_ADDR(pQuery)); + SET_SUPPLEMENT_SCAN_FLAG(pRuntimeEnv); - // usually this load operation will incure load disk block operation + // usually this load operation will incur load disk block operation TSKEY endKey = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->endPos); + assert((QUERY_IS_ASC_QUERY(pQuery) && endKey <= pQuery->ekey) || (!QUERY_IS_ASC_QUERY(pQuery) && endKey >= pQuery->ekey)); @@ -5332,17 +6004,25 @@ void vnodeScanAllData(SQueryRuntimeEnv *pRuntimeEnv) { /* store the start query position */ savePointPosition(&pRuntimeEnv->startPos, pQuery->fileId, pQuery->slot, pQuery->pos); - + int64_t skey = pQuery->lastKey; + while (1) { doScanAllDataBlocks(pRuntimeEnv); - // applied to agg functions (e.g., stddev) bool toContinue = true; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { // for each group result, call the finalize function for each column - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; + + SWindowStatus *pStatus = &pSlidingWindowInfo->pStatus[i]; + if (!pStatus->closed) { + continue; + } + setGroupOutputBuffer(pRuntimeEnv, buf); for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5380,33 +6060,32 @@ void vnodeScanAllData(SQueryRuntimeEnv *pRuntimeEnv) { } } + int64_t newSkey = pQuery->skey; + pQuery->skey = skey; + doSingleMeterSupplementScan(pRuntimeEnv); - - // reset status code - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; - for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { - buf->resultInfo[j].complete = false; - } - } - } else { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[i]); - if (pResInfo != NULL) { - pResInfo->complete = false; - } - } - } + + // update the pQuery->skey/pQuery->ekey to limit the scan scope of sliding query during + // supplementary scan + pQuery->skey = newSkey; } void doFinalizeResult(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { // for each group result, call the finalize function for each column - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo *pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + closeAllSlidingWindow(pSlidingWindowInfo); + } + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; + if (!slidingWindowClosed(pSlidingWindowInfo, i)) { + continue; + } + setGroupOutputBuffer(pRuntimeEnv, buf); for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { @@ -5462,7 +6141,48 @@ int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { } } - return maxOutput; + return maxOutput; +} + +static int32_t getNextIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeEnv *pRuntimeEnv, + int64_t *skey, int64_t *ekey) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + *skey = pRuntimeEnv->intervalWindow.skey + (pQuery->slidingTime * factor); + *ekey = pRuntimeEnv->intervalWindow.ekey + (pQuery->slidingTime * factor); + + if (pQuery->slidingTime > 0) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + // the next sliding window is not contained in the query time range + if (*skey < pSupporter->rawSKey) { + *skey = pSupporter->rawSKey; + } + + if (*skey > pSupporter->rawEKey) { + return QUERY_COMPLETED; + } + + if (*ekey > pSupporter->rawEKey) { + *ekey = pSupporter->rawEKey; + } + } else { + if (*skey > pSupporter->rawSKey) { + *skey = pSupporter->rawSKey; + } + + if (*skey < pSupporter->rawEKey) { + return QUERY_COMPLETED; + } + + if (*ekey < pSupporter->rawEKey) { + *ekey = pSupporter->rawEKey; + } + } + } + + return QUERY_NOT_COMPLETED; } /* @@ -5470,32 +6190,25 @@ int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { */ void forwardIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; - - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - pQuery->ekey += (pQuery->nAggTimeInterval * factor); - pQuery->skey = pQuery->ekey - (pQuery->nAggTimeInterval - 1) * factor; - - // boundary check - if (QUERY_IS_ASC_QUERY(pQuery)) { - if (pQuery->skey > pSupporter->rawEKey) { + if (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0) { + if ((QUERY_IS_ASC_QUERY(pQuery) && pQuery->lastKey >= pQuery->ekey) || + (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->lastKey <= pQuery->ekey)) { setQueryStatus(pQuery, QUERY_COMPLETED); - return; + } else { + /*TSKEY nextTimestamp =*/ loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); } - if (pQuery->ekey > pSupporter->rawEKey) { - pQuery->ekey = pSupporter->rawEKey; - } - } else { - if (pQuery->skey < pSupporter->rawEKey) { - setQueryStatus(pQuery, QUERY_COMPLETED); - return; - } + return; + } - if (pQuery->ekey < pSupporter->rawEKey) { - pQuery->ekey = pSupporter->rawEKey; - } + int32_t r = getNextIntervalQueryRange(pSupporter, pRuntimeEnv, &pQuery->skey, &pQuery->ekey); + if (r == QUERY_COMPLETED) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return; } + getNextLogicalQueryRange(pRuntimeEnv, &pRuntimeEnv->intervalWindow); + /* ensure the search in cache will return right position */ pQuery->lastKey = pQuery->skey; @@ -5510,7 +6223,7 @@ void forwardIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeE // bridge the gap in group by time function if ((nextTimestamp > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (nextTimestamp < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { - getAlignedIntervalQueryRange(pQuery, nextTimestamp, pSupporter->rawSKey, pSupporter->rawEKey); + getAlignedIntervalQueryRange(pRuntimeEnv, nextTimestamp, pSupporter->rawSKey, pSupporter->rawEKey); } } @@ -5535,37 +6248,51 @@ static int32_t offsetComparator(const void *pLeft, const void *pRight) { * @param pMeterHeadDataInfo * @return */ -SMeterDataInfo **vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, int32_t fileIndex, tSidSet *pSidSet, - SMeterDataInfo *pMeterDataInfo, int32_t *numOfMeters) { +int32_t vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, tSidSet *pSidSet, SMeterDataInfo *pMeterDataInfo, + int32_t *numOfMeters, SMeterDataInfo ***pReqMeterDataInfo) { SQuery *pQuery = &pQInfo->query; SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SMeterSidExtInfo ** pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; - + SVnodeObj *pVnode = &vnodeList[vid]; - char * pHeaderFileData = vnodeGetHeaderFileData(pRuntimeEnv, vid, fileIndex); - if (pHeaderFileData == NULL) { // failed to load header file into buffer - return 0; + char *buf = calloc(1, getCompHeaderSegSize(&pVnode->cfg)); + if (buf == NULL) { + *numOfMeters = 0; + return TSDB_CODE_SERV_OUT_OF_MEMORY; } - - int32_t tmsize = sizeof(SCompHeader) * (pVnode->cfg.maxSessions) + sizeof(TSCKSUM); - // file is corrupted, abort query in current file - if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, vid, pHeaderFileData, tmsize) < 0) { + SQueryFilesInfo *pVnodeFileInfo = &pRuntimeEnv->vnodeFileInfo; + + int32_t headerSize = getCompHeaderSegSize(&pVnode->cfg); + lseek(pVnodeFileInfo->headerFd, TSDB_FILE_HEADER_LEN, SEEK_SET); + read(pVnodeFileInfo->headerFd, buf, headerSize); + + // check the offset value integrity + if (validateHeaderOffsetSegment(pQInfo, pRuntimeEnv->vnodeFileInfo.headerFilePath, vid, buf - TSDB_FILE_HEADER_LEN, + headerSize) < 0) { + free(buf); *numOfMeters = 0; - return 0; + + return TSDB_CODE_FILE_CORRUPTED; } - int64_t oldestKey = getOldestKey(pVnode->numOfFiles, pVnode->fileId, &pVnode->cfg); - SMeterDataInfo **pReqMeterDataInfo = malloc(POINTER_BYTES * pSidSet->numOfSids); + int64_t oldestKey = getOldestKey(pVnode->numOfFiles, pVnode->fileId, &pVnode->cfg); + (*pReqMeterDataInfo) = malloc(POINTER_BYTES * pSidSet->numOfSids); + if (*pReqMeterDataInfo == NULL) { + free(buf); + *numOfMeters = 0; + + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } int32_t groupId = 0; TSKEY skey, ekey; for (int32_t i = 0; i < pSidSet->numOfSids; ++i) { // load all meter meta info - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[i]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[i]->sid); if (pMeterObj == NULL) { dError("QInfo:%p failed to find required sid:%d", pQInfo, pMeterSidExtInfo[i]->sid); continue; @@ -5602,53 +6329,56 @@ SMeterDataInfo **vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, int32_t } } - int64_t headerOffset = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pMeterObj->sid; - - SCompHeader *compHeader = (SCompHeader *)(pHeaderFileData + headerOffset); - - if (compHeader->compInfoOffset == 0) { + int64_t headerOffset = sizeof(SCompHeader) * pMeterObj->sid; + SCompHeader *compHeader = (SCompHeader *)(buf + headerOffset); + if (compHeader->compInfoOffset == 0) { // current table is empty continue; } - if (compHeader->compInfoOffset < sizeof(SCompHeader) * pVnode->cfg.maxSessions + TSDB_FILE_HEADER_LEN || - compHeader->compInfoOffset > pRuntimeEnv->vnodeFileInfo.headFileSize) { - dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid", pQuery, pMeterObj->vnode, pMeterObj->sid, - pMeterObj->meterId, compHeader->compInfoOffset); - continue; + // corrupted file may cause the invalid compInfoOffset, check needs + int32_t compHeaderOffset = getCompHeaderStartPosition(&pVnode->cfg); + if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, &pRuntimeEnv->vnodeFileInfo, compHeaderOffset) != + TSDB_CODE_SUCCESS) { + free(buf); + *numOfMeters = 0; + + return TSDB_CODE_FILE_CORRUPTED; } pOneMeterDataInfo->offsetInHeaderFile = (uint64_t)compHeader->compInfoOffset; if (pOneMeterDataInfo->pMeterQInfo == NULL) { - pOneMeterDataInfo->pMeterQInfo = createMeterQueryInfo(pQuery, pSupporter->rawSKey, pSupporter->rawEKey); + pOneMeterDataInfo->pMeterQInfo = createMeterQueryInfo(pQuery, pMeterObj->sid, pSupporter->rawSKey, pSupporter->rawEKey); } - pReqMeterDataInfo[*numOfMeters] = pOneMeterDataInfo; + (*pReqMeterDataInfo)[*numOfMeters] = pOneMeterDataInfo; (*numOfMeters) += 1; } assert(*numOfMeters <= pSidSet->numOfSids); - /* enable access sequentially */ + /* enable sequentially access*/ if (*numOfMeters > 1) { - qsort(pReqMeterDataInfo, *numOfMeters, POINTER_BYTES, offsetComparator); + qsort((*pReqMeterDataInfo), *numOfMeters, POINTER_BYTES, offsetComparator); } - return pReqMeterDataInfo; + free(buf); + + return TSDB_CODE_SUCCESS; } -SMeterQueryInfo *createMeterQueryInfo(SQuery *pQuery, TSKEY skey, TSKEY ekey) { +SMeterQueryInfo *createMeterQueryInfo(SQuery *pQuery, int32_t sid, TSKEY skey, TSKEY ekey) { SMeterQueryInfo *pMeterQueryInfo = calloc(1, sizeof(SMeterQueryInfo)); pMeterQueryInfo->skey = skey; pMeterQueryInfo->ekey = ekey; pMeterQueryInfo->lastKey = skey; - pMeterQueryInfo->numOfPages = 0; - pMeterQueryInfo->numOfAlloc = INIT_ALLOCATE_DISK_PAGES; - pMeterQueryInfo->pageList = calloc(pMeterQueryInfo->numOfAlloc, sizeof(uint32_t)); +// pMeterQueryInfo->numOfPages = 0; +// pMeterQueryInfo->numOfAlloc = INIT_ALLOCATE_DISK_PAGES; +// pMeterQueryInfo->pageList = calloc(pMeterQueryInfo->numOfAlloc, sizeof(uint32_t)); pMeterQueryInfo->lastResRows = 0; - + pMeterQueryInfo->sid = sid; pMeterQueryInfo->cur.vnodeIndex = -1; pMeterQueryInfo->resultInfo = calloc((size_t)pQuery->numOfOutputCols, sizeof(SResultInfo)); @@ -5665,7 +6395,7 @@ void destroyMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, int32_t numOfCols) return; } - free(pMeterQueryInfo->pageList); +// free(pMeterQueryInfo->pageList); for (int32_t i = 0; i < numOfCols; ++i) { tfree(pMeterQueryInfo->resultInfo[i].interResultBuf); } @@ -5674,7 +6404,8 @@ void destroyMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, int32_t numOfCols) free(pMeterQueryInfo); } -void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY skey, TSKEY ekey) { +void changeMeterQueryInfoForSuppleQuery(SQueryResultBuf* pResultBuf, SMeterQueryInfo *pMeterQueryInfo, TSKEY skey, + TSKEY ekey) { if (pMeterQueryInfo == NULL) { return; } @@ -5688,7 +6419,9 @@ void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY pMeterQueryInfo->cur.vnodeIndex = -1; // previous does not generate any results - if (pMeterQueryInfo->numOfPages == 0) { + SIDList list = getDataBufPagesIdList(pResultBuf, pMeterQueryInfo->sid); + + if (list.size == 0) { pMeterQueryInfo->reverseFillRes = 0; } else { pMeterQueryInfo->reverseIndex = pMeterQueryInfo->numOfRes; @@ -5696,28 +6429,6 @@ void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY } } -static tFilePage *allocNewPage(SMeterQuerySupportObj *pSupporter, uint32_t *pageId) { - if (pSupporter->lastPageId == pSupporter->numOfPages - 1) { - extendDiskBuf(pSupporter, pSupporter->numOfPages + pSupporter->numOfMeters); - } - - *pageId = (++pSupporter->lastPageId); - return getFilePage(pSupporter, *pageId); -} - -tFilePage *addDataPageForMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter) { - uint32_t pageId = 0; - tFilePage *pPage = allocNewPage(pSupporter, &pageId); - - if (pMeterQueryInfo->numOfPages >= pMeterQueryInfo->numOfAlloc) { - pMeterQueryInfo->numOfAlloc = pMeterQueryInfo->numOfAlloc << 1; - pMeterQueryInfo->pageList = realloc(pMeterQueryInfo->pageList, sizeof(uint32_t) * pMeterQueryInfo->numOfAlloc); - } - - pMeterQueryInfo->pageList[pMeterQueryInfo->numOfPages++] = pageId; - return pPage; -} - void saveIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, SMeterQueryInfo *pMeterQueryInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; @@ -5744,10 +6455,12 @@ void restoreIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, SMeterQueryInfo *p ((pQuery->lastKey <= pQuery->skey) && !QUERY_IS_ASC_QUERY(pQuery))); } -static void clearMeterDataBlockInfo(SMeterDataInfo *pMeterDataInfo) { - tfree(pMeterDataInfo->pBlock); - pMeterDataInfo->numOfBlocks = 0; - pMeterDataInfo->start = 0; +static void clearAllMeterDataBlockInfo(SMeterDataInfo **pMeterDataInfo, int32_t start, int32_t end) { + for (int32_t i = start; i < end; ++i) { + tfree(pMeterDataInfo[i]->pBlock); + pMeterDataInfo[i]->numOfBlocks = 0; + pMeterDataInfo[i]->start = -1; + } } static bool getValidDataBlocksRangeIndex(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, SCompBlock *pCompBlock, @@ -5788,24 +6501,22 @@ static bool getValidDataBlocksRangeIndex(SMeterDataInfo *pMeterDataInfo, SQuery return true; } -static bool setValidDataBlocks(SMeterDataInfo *pMeterDataInfo, SCompBlock *pCompBlock, int32_t end) { +static bool setValidDataBlocks(SMeterDataInfo *pMeterDataInfo, int32_t end) { int32_t size = (end - pMeterDataInfo->start) + 1; assert(size > 0); if (size != pMeterDataInfo->numOfBlocks) { - char *tmp = realloc(pMeterDataInfo->pBlock, POINTER_BYTES * size); + memmove(pMeterDataInfo->pBlock, &pMeterDataInfo->pBlock[pMeterDataInfo->start], size * sizeof(SCompBlock)); + + char *tmp = realloc(pMeterDataInfo->pBlock, size * sizeof(SCompBlock)); if (tmp == NULL) { return false; } - pMeterDataInfo->pBlock = (SCompBlock **)tmp; + pMeterDataInfo->pBlock = (SCompBlock *)tmp; pMeterDataInfo->numOfBlocks = size; } - for (int32_t i = pMeterDataInfo->start, j = 0; i <= end; ++i, ++j) { - pMeterDataInfo->pBlock[j] = &pCompBlock[i]; - } - return true; } @@ -5824,60 +6535,81 @@ static bool setCurrentQueryRange(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, } if (*minval > *maxval) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, pMeterQInfo->lastKey); + qTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, + pMeterQInfo->lastKey); return false; } else { - qTrace("QInfo:%p vid:%d sid:%d id:%s, query in files, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, pMeterQInfo->lastKey); + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in files, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, + pMeterQInfo->lastKey); return true; } } /** - * + * @param pSupporter * @param pQuery - * @param pHeaderData * @param numOfMeters + * @param filePath * @param pMeterDataInfo * @return */ -uint32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, char *pHeaderData, - int32_t numOfMeters, const char* filePath, SMeterDataInfo **pMeterDataInfo) { - uint32_t numOfBlocks = 0; +int32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, int32_t numOfMeters, + const char *filePath, SMeterDataInfo **pMeterDataInfo, uint32_t *numOfBlocks) { SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); SQueryCostSummary *pSummary = &pSupporter->runtimeEnv.summary; TSKEY minval, maxval; + *numOfBlocks = 0; + SQueryFilesInfo *pVnodeFileInfo = &pSupporter->runtimeEnv.vnodeFileInfo; + // sequentially scan this header file to extract the compHeader info for (int32_t j = 0; j < numOfMeters; ++j) { SMeterObj *pMeterObj = pMeterDataInfo[j]->pMeterObj; - SCompInfo *compInfo = (SCompInfo *)(pHeaderData + pMeterDataInfo[j]->offsetInHeaderFile); - int32_t ret = validateCompBlockInfoSegment(pQInfo, filePath, pMeterObj->vnode, compInfo, + lseek(pVnodeFileInfo->headerFd, pMeterDataInfo[j]->offsetInHeaderFile, SEEK_SET); + + SCompInfo compInfo = {0}; + read(pVnodeFileInfo->headerFd, &compInfo, sizeof(SCompInfo)); + + int32_t ret = validateCompBlockInfoSegment(pQInfo, filePath, pMeterObj->vnode, &compInfo, pMeterDataInfo[j]->offsetInHeaderFile); - if (ret != 0) { - clearMeterDataBlockInfo(pMeterDataInfo[j]); - continue; + if (ret != TSDB_CODE_SUCCESS) { // file corrupted + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); + return TSDB_CODE_FILE_CORRUPTED; } - if (compInfo->numOfBlocks <= 0 || compInfo->uid != pMeterDataInfo[j]->pMeterObj->uid) { - clearMeterDataBlockInfo(pMeterDataInfo[j]); + if (compInfo.numOfBlocks <= 0 || compInfo.uid != pMeterDataInfo[j]->pMeterObj->uid) { + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); continue; } - int32_t size = compInfo->numOfBlocks * sizeof(SCompBlock); - SCompBlock *pCompBlock = (SCompBlock *)((char *)compInfo + sizeof(SCompInfo)); + int32_t size = compInfo.numOfBlocks * sizeof(SCompBlock); + size_t bufferSize = size + sizeof(TSCKSUM); + + pMeterDataInfo[j]->numOfBlocks = compInfo.numOfBlocks; + char* p = realloc(pMeterDataInfo[j]->pBlock, bufferSize); + if (p == NULL) { + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } else { + memset(p, 0, bufferSize); + pMeterDataInfo[j]->pBlock = (SCompBlock*) p; + } + + read(pVnodeFileInfo->headerFd, pMeterDataInfo[j]->pBlock, bufferSize); + TSCKSUM checksum = *(TSCKSUM *)((char *)pMeterDataInfo[j]->pBlock + size); int64_t st = taosGetTimestampUs(); // check compblock integrity - TSCKSUM checksum = *(TSCKSUM *)((char *)compInfo + sizeof(SCompInfo) + size); - ret = validateCompBlockSegment(pQInfo, filePath, compInfo, (char *)pCompBlock, pMeterObj->vnode, checksum); - if (ret < 0) { - clearMeterDataBlockInfo(pMeterDataInfo[j]); - continue; + ret = validateCompBlockSegment(pQInfo, filePath, &compInfo, (char *)pMeterDataInfo[j]->pBlock, pMeterObj->vnode, + checksum); + if (ret != TSDB_CODE_SUCCESS) { + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); + return TSDB_CODE_FILE_CORRUPTED; } int64_t et = taosGetTimestampUs(); @@ -5887,31 +6619,32 @@ uint32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuer pSummary->loadCompInfoUs += (et - st); if (!setCurrentQueryRange(pMeterDataInfo[j], pQuery, pSupporter->rawEKey, &minval, &maxval)) { - clearMeterDataBlockInfo(pMeterDataInfo[j]); + clearAllMeterDataBlockInfo(pMeterDataInfo, j, j + 1); continue; } int32_t end = 0; - if (!getValidDataBlocksRangeIndex(pMeterDataInfo[j], pQuery, pCompBlock, compInfo->numOfBlocks, minval, maxval, - &end)) { - clearMeterDataBlockInfo(pMeterDataInfo[j]); + if (!getValidDataBlocksRangeIndex(pMeterDataInfo[j], pQuery, pMeterDataInfo[j]->pBlock, compInfo.numOfBlocks, + minval, maxval, &end)) { + // current table has no qualified data blocks, erase its information. + clearAllMeterDataBlockInfo(pMeterDataInfo, j, j + 1); continue; } - if (!setValidDataBlocks(pMeterDataInfo[j], pCompBlock, end)) { - clearMeterDataBlockInfo(pMeterDataInfo[j]); - pQInfo->killed = 1; // todo set query kill, abort current query since no - // memory available - return 0; + if (!setValidDataBlocks(pMeterDataInfo[j], end)) { + clearAllMeterDataBlockInfo(pMeterDataInfo, 0, numOfMeters); + + pQInfo->killed = 1; // set query kill, abort current query since no memory available + return TSDB_CODE_SERV_OUT_OF_MEMORY; } qTrace("QInfo:%p vid:%d sid:%d id:%s, startIndex:%d, %d blocks qualified", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterDataInfo[j]->start, pMeterDataInfo[j]->numOfBlocks); - numOfBlocks += pMeterDataInfo[j]->numOfBlocks; + (*numOfBlocks) += pMeterDataInfo[j]->numOfBlocks; } - return numOfBlocks; + return TSDB_CODE_SUCCESS; } static void freeDataBlockFieldInfo(SMeterDataBlockInfoEx *pDataBlockInfoEx, int32_t len) { @@ -5962,23 +6695,33 @@ static int32_t blockAccessOrderComparator(const void *pLeft, const void *pRight, return pLeftBlockInfoEx->pBlock.compBlock->offset > pRightBlockInfoEx->pBlock.compBlock->offset ? 1 : -1; } +void cleanBlockOrderSupporter(SBlockOrderSupporter *pSupporter, int32_t numOfTables) { + tfree(pSupporter->numOfBlocksPerMeter); + tfree(pSupporter->blockIndexArray); + + for (int32_t i = 0; i < numOfTables; ++i) { + tfree(pSupporter->pDataBlockInfoEx[i]); + } + + tfree(pSupporter->pDataBlockInfoEx); +} + int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMeters, SMeterDataBlockInfoEx **pDataBlockInfoEx, int32_t numOfCompBlocks, - int32_t *nAllocBlocksInfoSize, int64_t addr) { + int32_t *numOfAllocBlocks, int64_t addr) { // release allocated memory first - freeDataBlockFieldInfo(*pDataBlockInfoEx, *nAllocBlocksInfoSize); + freeDataBlockFieldInfo(*pDataBlockInfoEx, *numOfAllocBlocks); - if (*nAllocBlocksInfoSize == 0 || *nAllocBlocksInfoSize < numOfCompBlocks) { + if (*numOfAllocBlocks == 0 || *numOfAllocBlocks < numOfCompBlocks) { char *tmp = realloc((*pDataBlockInfoEx), sizeof(SMeterDataBlockInfoEx) * numOfCompBlocks); if (tmp == NULL) { tfree(*pDataBlockInfoEx); - return -1; - } else { - *pDataBlockInfoEx = (SMeterDataBlockInfoEx *)tmp; + return TSDB_CODE_SERV_OUT_OF_MEMORY; } + *pDataBlockInfoEx = (SMeterDataBlockInfoEx *)tmp; memset((*pDataBlockInfoEx), 0, sizeof(SMeterDataBlockInfoEx) * numOfCompBlocks); - *nAllocBlocksInfoSize = numOfCompBlocks; + *numOfAllocBlocks = numOfCompBlocks; } SBlockOrderSupporter supporter = {0}; @@ -5989,10 +6732,8 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet if (supporter.numOfBlocksPerMeter == NULL || supporter.blockIndexArray == NULL || supporter.pDataBlockInfoEx == NULL) { - tfree(supporter.numOfBlocksPerMeter); - tfree(supporter.blockIndexArray); - tfree(supporter.pDataBlockInfoEx); - return -1; + cleanBlockOrderSupporter(&supporter, 0); + return TSDB_CODE_SERV_OUT_OF_MEMORY; } int32_t cnt = 0; @@ -6002,22 +6743,26 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet continue; } - SCompBlock **pBlock = pMeterDataInfo[j]->pBlock; + SCompBlock *pBlock = pMeterDataInfo[j]->pBlock; supporter.numOfBlocksPerMeter[numOfQualMeters] = pMeterDataInfo[j]->numOfBlocks; - // TODO handle failed to allocate memory - supporter.pDataBlockInfoEx[numOfQualMeters] = - calloc(1, sizeof(SMeterDataBlockInfoEx) * pMeterDataInfo[j]->numOfBlocks); - - for (int32_t k = 0; k < pMeterDataInfo[j]->numOfBlocks; ++k) { - SMeterDataBlockInfoEx *pInfoEx = &supporter.pDataBlockInfoEx[numOfQualMeters][k]; + char *buf = calloc(1, sizeof(SMeterDataBlockInfoEx) * pMeterDataInfo[j]->numOfBlocks); + if (buf == NULL) { + cleanBlockOrderSupporter(&supporter, numOfQualMeters); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } - pInfoEx->pBlock.compBlock = pBlock[k]; - pInfoEx->pBlock.fields = NULL; + supporter.pDataBlockInfoEx[numOfQualMeters] = (SMeterDataBlockInfoEx *)buf; - pInfoEx->pMeterDataInfo = pMeterDataInfo[j]; - pInfoEx->groupIdx = pMeterDataInfo[j]->groupIdx; // set the group index - pInfoEx->blockIndex = pMeterDataInfo[j]->start + k; // set the block index in original meter + for (int32_t k = 0; k < pMeterDataInfo[j]->numOfBlocks; ++k) { + SMeterDataBlockInfoEx *pBlockInfoEx = &supporter.pDataBlockInfoEx[numOfQualMeters][k]; + + pBlockInfoEx->pBlock.compBlock = &pBlock[k]; + pBlockInfoEx->pBlock.fields = NULL; + + pBlockInfoEx->pMeterDataInfo = pMeterDataInfo[j]; + pBlockInfoEx->groupIdx = pMeterDataInfo[j]->groupIdx; // set the group index + pBlockInfoEx->blockIndex = pMeterDataInfo[j]->start + k; // set the block index in original meter cnt++; } @@ -6026,12 +6771,15 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet dTrace("QInfo %p create data blocks info struct completed", addr); - assert(cnt <= numOfCompBlocks && numOfQualMeters <= numOfMeters); + assert(cnt == numOfCompBlocks && numOfQualMeters <= numOfMeters); // the pMeterDataInfo[j]->numOfBlocks may be 0 supporter.numOfMeters = numOfQualMeters; SLoserTreeInfo *pTree = NULL; uint8_t ret = tLoserTreeCreate(&pTree, supporter.numOfMeters, &supporter, blockAccessOrderComparator); - UNUSED(ret); + if (ret != TSDB_CODE_SUCCESS) { + cleanBlockOrderSupporter(&supporter, numOfMeters); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } int32_t numOfTotal = 0; @@ -6042,10 +6790,11 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet (*pDataBlockInfoEx)[numOfTotal++] = pBlocksInfoEx[index]; + // set data block index overflow, in order to disable the offset comparator if (supporter.blockIndexArray[pos] >= supporter.numOfBlocksPerMeter[pos]) { - /* set data block index overflow, in order to disable the offset comparator */ supporter.blockIndexArray[pos] = supporter.numOfBlocksPerMeter[pos] + 1; } + tLoserTreeAdjust(pTree, pos + supporter.numOfMeters); } @@ -6057,18 +6806,10 @@ int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMet */ dTrace("QInfo %p %d data blocks sort completed", addr, cnt); - - tfree(supporter.numOfBlocksPerMeter); - tfree(supporter.blockIndexArray); - - for (int32_t i = 0; i < numOfMeters; ++i) { - tfree(supporter.pDataBlockInfoEx[i]); - } - - tfree(supporter.pDataBlockInfoEx); + cleanBlockOrderSupporter(&supporter, numOfMeters); free(pTree); - return cnt; + return TSDB_CODE_SUCCESS; } /** @@ -6099,25 +6840,23 @@ void setExecutionContext(SMeterQuerySupportObj *pSupporter, SOutputRes *outputRe static void setGroupOutputBuffer(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResult) { SQuery *pQuery = pRuntimeEnv->pQuery; - - // Note: pResult->result[i]->numOfElems == 0, there is only fixed number of results for each group + + // Note: pResult->pos[i]->numOfElems == 0, there is only fixed number of results for each group for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - assert(pResult->result[i]->numOfElems == 0 || pResult->result[i]->numOfElems == 1); - SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - pCtx->aOutputBuf = pResult->result[i]->data + pCtx->outputBytes * pResult->result[i]->numOfElems; - + pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult); + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } - + /* * set the output buffer information and intermediate buffer * not all queries require the interResultBuf, such as COUNT */ pCtx->resultInfo = &pResult->resultInfo[i]; - + // set super table query flag SResultInfo *pResInfo = GET_RES_INFO(pCtx); if (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) { @@ -6142,12 +6881,16 @@ void setCtxOutputPointerForSupplementScan(SMeterQuerySupportObj *pSupporter, SMe tFilePage *pData = NULL; int32_t i = 0; + SQueryResultBuf* pResultBuf = pRuntimeEnv->pResultBuf; + // find the position for this output result - for (; i < pMeterQueryInfo->numOfPages; ++i) { - pData = getFilePage(pSupporter, pMeterQueryInfo->pageList[i]); + SIDList list = getDataBufPagesIdList(pResultBuf, pMeterQueryInfo->sid); + for (; i < list.size; ++i) { + pData = getResultBufferPageById(pResultBuf, list.pData[i]); if (index <= pData->numOfElems) { break; } + index -= pData->numOfElems; } @@ -6187,7 +6930,7 @@ void setCtxOutputPointerForSupplementScan(SMeterQuerySupportObj *pSupporter, SMe // the first column is always the timestamp for interval query TSKEY ts = *(TSKEY *)pRuntimeEnv->pCtx[0].aOutputBuf; SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - qTrace("QInfo:%p vid:%d sid:%d id:%s, set output result pointer, ts:%lld, index:%d", GET_QINFO_ADDR(pQuery), + qTrace("QInfo:%p vid:%d sid:%d id:%s, set output result pointer, ts:%" PRId64 ", index:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->reverseIndex); } @@ -6205,46 +6948,54 @@ void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t } } -void setOutputBufferForIntervalQuery(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo) { +int32_t setOutputBufferForIntervalQuery(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; tFilePage * pData = NULL; + SQueryResultBuf* pResultBuf = pRuntimeEnv->pResultBuf; + // in the first scan, new space needed for results - if (pMeterQueryInfo->numOfPages == 0) { - pData = addDataPageForMeterQueryInfo(pMeterQueryInfo, pSupporter); + SIDList list = getDataBufPagesIdList(pResultBuf, pMeterQueryInfo->sid); + int32_t pageId = -1; + if (list.size == 0) { + pData = getNewDataBuf(pResultBuf, pMeterQueryInfo->sid, &pageId); } else { - int32_t lastPageId = pMeterQueryInfo->pageList[pMeterQueryInfo->numOfPages - 1]; - pData = getFilePage(pSupporter, lastPageId); + pData = getResultBufferPageById(pResultBuf, getLastPageId(&list)); if (pData->numOfElems >= pRuntimeEnv->numOfRowsPerPage) { - pData = addDataPageForMeterQueryInfo(pMeterQueryInfo, pSupporter); - assert(pData->numOfElems == 0); // number of elements must be 0 for new allocated buffer + pData = getNewDataBuf(pResultBuf, pMeterQueryInfo->sid, &pageId); + if (pData != NULL) { + assert(pData->numOfElems == 0); // number of elements must be 0 for new allocated buffer + } } } + if (pData == NULL) { + return -1; + } + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { pRuntimeEnv->pCtx[i].aOutputBuf = getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, i); pRuntimeEnv->pCtx[i].resultInfo = &pMeterQueryInfo->resultInfo[i]; } + + return TSDB_CODE_SUCCESS; } -void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t meterIdx, - SMeterQueryInfo *pMeterQueryInfo) { +int32_t setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t meterIdx, + SMeterQueryInfo *pMeterQueryInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; - SQuery * pQuery = pRuntimeEnv->pQuery; if (IS_MASTER_SCAN(pRuntimeEnv)) { - setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo); + if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) { + // not enough disk space or memory buffer for intermediate results + return -1; + } if (pMeterQueryInfo->lastResRows == 0) { initCtxOutputBuf(pRuntimeEnv); } - // reset the number of iterated elements, once this function is called. since the pCtx for different - for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { - // pRuntimeEnv->pCtx[j].numOfIteratedElems = 0; - } - } else { if (pMeterQueryInfo->reverseFillRes) { setCtxOutputPointerForSupplementScan(pSupporter, pMeterQueryInfo); @@ -6255,7 +7006,9 @@ void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t * * If the master scan does not produce any results, new spaces needed to be allocated during supplement scan */ - setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo); + if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) { + return -1; + } } } @@ -6274,174 +7027,277 @@ void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t tsBufSetCursor(pSupporter->runtimeEnv.pTSBuf, &pMeterQueryInfo->cur); } } + + return 0; } -static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pInfo, - SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields, +//static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, +// SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, SField *pFields, +// __block_search_fn_t searchFn) { +// SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; +// SQuery * pQuery = pRuntimeEnv->pQuery; +// int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); +// +// int64_t nextKey = -1; +// bool queryCompleted = false; +// +// while (1) { +// int32_t numOfRes = 0; +// int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryCol, pFields, searchFn, &numOfRes); +// assert(steps > 0); +// +// // NOTE: in case of stable query, only ONE(or ZERO) row of pos generated for each query range +// if (pMeterQueryInfo->lastResRows == 0) { +// pMeterQueryInfo->lastResRows = numOfRes; +// } else { +// assert(pMeterQueryInfo->lastResRows == 1); +// } +// +// int32_t pos = pQuery->pos + steps * factor; +// +// // query does not reach the end of current block +// if ((pos < pBlockInfo->size && QUERY_IS_ASC_QUERY(pQuery)) || (pos >= 0 && !QUERY_IS_ASC_QUERY(pQuery))) { +// nextKey = pPrimaryCol[pos]; +// } else { +// assert((pQuery->lastKey > pBlockInfo->keyLast && QUERY_IS_ASC_QUERY(pQuery)) || +// (pQuery->lastKey < pBlockInfo->keyFirst && !QUERY_IS_ASC_QUERY(pQuery))); +// } +// +// // all data satisfy current query are checked, query completed +// if (QUERY_IS_ASC_QUERY(pQuery)) { +// queryCompleted = (nextKey > pQuery->ekey || pQuery->ekey <= pBlockInfo->keyLast); +// } else { +// queryCompleted = (nextKey < pQuery->ekey || pQuery->ekey >= pBlockInfo->keyFirst); +// } +// +// /* +// * 1. there may be more date that satisfy current query interval, other than +// * current block, we need to try next data blocks +// * 2. query completed, since reaches the upper bound of the main query range +// */ +// if (QUERY_IS_ASC_QUERY(pQuery)) { +// if (pQuery->lastKey > pBlockInfo->keyLast || pQuery->lastKey > pSupporter->rawEKey || +// nextKey > pSupporter->rawEKey) { +// /* +// * current interval query is completed, set query pos flag closed and +// * try next data block if pQuery->ekey == pSupporter->rawEKey, whole query is completed +// */ +// if (pQuery->lastKey > pBlockInfo->keyLast) { +// assert(pQuery->ekey >= pBlockInfo->keyLast); +// } +// +// if (pQuery->lastKey > pSupporter->rawEKey || nextKey > pSupporter->rawEKey) { +// /* whole query completed, save pos and abort */ +// assert(queryCompleted); +// saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); +// +// // save the pQuery->lastKey for retrieve data in cache, actually, there will be no qualified data in cache. +// saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); +// } else if (pQuery->ekey == pBlockInfo->keyLast) { +// /* current interval query is completed, set the next query range on other data blocks if exist */ +// int64_t prevEKey = pQuery->ekey; +// +// getAlignedIntervalQueryRange(pRuntimeEnv, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); +// saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); +// +// assert(queryCompleted && prevEKey < pQuery->skey); +// if (pMeterQueryInfo->lastResRows > 0) { +// saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); +// } +// } else { +// /* +// * Data that satisfy current query range may locate in current block and blocks that are directly right +// * next to current block. Therefore, we need to keep the query range(interval) unchanged until reaching +// * the direct next data block, while only forwards the pQuery->lastKey. +// * +// * With the information of the directly next data block, whether locates in cache or disk, +// * current interval query being completed or not can be decided. +// */ +// saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); +// assert(pQuery->lastKey > pBlockInfo->keyLast && pQuery->lastKey <= pQuery->ekey); +// +// /* +// * if current block is the last block of current file, we still close the pos flag, and +// * merge with other meters in the same group +// */ +// if (queryCompleted) { +// saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); +// } +// } +// +// break; +// } +// } else { +// if (pQuery->lastKey < pBlockInfo->keyFirst || pQuery->lastKey < pSupporter->rawEKey || +// nextKey < pSupporter->rawEKey) { +// if (pQuery->lastKey < pBlockInfo->keyFirst) { +// assert(pQuery->ekey <= pBlockInfo->keyFirst); +// } +// +// if (pQuery->lastKey < pSupporter->rawEKey || (nextKey < pSupporter->rawEKey && nextKey != -1)) { +// /* whole query completed, save pos and abort */ +// assert(queryCompleted); +// saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); +// +// /* +// * save the pQuery->lastKey for retrieve data in cache, actually, +// * there will be no qualified data in cache. +// */ +// saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); +// } else if (pQuery->ekey == pBlockInfo->keyFirst) { +// // current interval query is completed, set the next query range on other data blocks if exist +// int64_t prevEKey = pQuery->ekey; +// +// getAlignedIntervalQueryRange(pRuntimeEnv, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); +// saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); +// +// assert(queryCompleted && prevEKey > pQuery->skey); +// if (pMeterQueryInfo->lastResRows > 0) { +// saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); +// } +// } else { +// /* +// * Data that satisfy current query range may locate in current block and blocks that are +// * directly right next to current block. Therefore, we need to keep the query range(interval) +// * unchanged until reaching the direct next data block, while only forwards the pQuery->lastKey. +// * +// * With the information of the directly next data block, whether locates in cache or disk, +// * current interval query being completed or not can be decided. +// */ +// saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); +// assert(pQuery->lastKey < pBlockInfo->keyFirst && pQuery->lastKey >= pQuery->ekey); +// +// /* +// * if current block is the last block of current file, we still close the pos +// * flag, and merge with other meters in the same group +// */ +// if (queryCompleted) { +// saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); +// } +// } +// +// break; +// } +// } +// +// assert(queryCompleted); +// saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); +// +// assert((nextKey >= pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || +// (nextKey <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); +// +// /* still in the same block to query */ +// getAlignedIntervalQueryRange(pRuntimeEnv, nextKey, pSupporter->rawSKey, pSupporter->rawEKey); +// saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); +// +// int32_t newPos = searchFn((char *)pPrimaryCol, pBlockInfo->size, pQuery->skey, pQuery->order.order); +// assert(newPos == pQuery->pos + steps * factor); +// +// pQuery->pos = newPos; +// } +//} + +static void doApplyIntervalQueryOnBlock_rv(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, + SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, SField *pFields, __block_search_fn_t searchFn) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - - int64_t nextKey = -1; - bool queryCompleted = false; - + while (1) { + int64_t nextKey = -1; int32_t numOfRes = 0; - int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryCol, sdata, pFields, searchFn, &numOfRes); + + int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryCol, pFields, searchFn, &numOfRes); assert(steps > 0); // NOTE: in case of stable query, only ONE(or ZERO) row of result generated for each query range - if (pInfo->lastResRows == 0) { - pInfo->lastResRows = numOfRes; + if (pMeterQueryInfo->lastResRows == 0) { + pMeterQueryInfo->lastResRows = numOfRes; } else { - assert(pInfo->lastResRows == 1); + assert(pMeterQueryInfo->lastResRows == 1); } - + int32_t pos = pQuery->pos + steps * factor; - + // query does not reach the end of current block if ((pos < pBlockInfo->size && QUERY_IS_ASC_QUERY(pQuery)) || (pos >= 0 && !QUERY_IS_ASC_QUERY(pQuery))) { nextKey = pPrimaryCol[pos]; } else { assert((pQuery->lastKey > pBlockInfo->keyLast && QUERY_IS_ASC_QUERY(pQuery)) || - (pQuery->lastKey < pBlockInfo->keyFirst && !QUERY_IS_ASC_QUERY(pQuery))); + (pQuery->lastKey < pBlockInfo->keyFirst && !QUERY_IS_ASC_QUERY(pQuery))); } - + // all data satisfy current query are checked, query completed + bool completed = false; if (QUERY_IS_ASC_QUERY(pQuery)) { - queryCompleted = (nextKey > pQuery->ekey || pQuery->ekey <= pBlockInfo->keyLast); + completed = (pQuery->lastKey > pQuery->ekey); } else { - queryCompleted = (nextKey < pQuery->ekey || pQuery->ekey >= pBlockInfo->keyFirst); + completed = (pQuery->lastKey < pQuery->ekey); } - + /* * 1. there may be more date that satisfy current query interval, other than * current block, we need to try next data blocks * 2. query completed, since reaches the upper bound of the main query range */ - if (QUERY_IS_ASC_QUERY(pQuery)) { - if (pQuery->lastKey > pBlockInfo->keyLast || pQuery->lastKey > pSupporter->rawEKey || - nextKey > pSupporter->rawEKey) { - /* - * current interval query is completed, set query result flag closed and - * try next data block if pQuery->ekey == pSupporter->rawEKey, whole query is completed - */ - if (pQuery->lastKey > pBlockInfo->keyLast) { - assert(pQuery->ekey >= pBlockInfo->keyLast); - } - - if (pQuery->lastKey > pSupporter->rawEKey || nextKey > pSupporter->rawEKey) { - /* whole query completed, save result and abort */ - assert(queryCompleted); - saveResult(pSupporter, pInfo, pInfo->lastResRows); - - // save the pQuery->lastKey for retrieve data in cache, actually, there will be no qualified data in cache. - saveIntervalQueryRange(pRuntimeEnv, pInfo); - } else if (pQuery->ekey == pBlockInfo->keyLast) { - /* current interval query is completed, set the next query range on other data blocks if exist */ - int64_t prevEKey = pQuery->ekey; - - getAlignedIntervalQueryRange(pQuery, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); - saveIntervalQueryRange(pRuntimeEnv, pInfo); - - assert(queryCompleted && prevEKey < pQuery->skey); - if (pInfo->lastResRows > 0) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); - } - } else { - /* - * Data that satisfy current query range may locate in current block and blocks that are directly right - * next to current block. Therefore, we need to keep the query range(interval) unchanged until reaching - * the direct next data block, while only forwards the pQuery->lastKey. - * - * With the information of the directly next data block, whether locates in cache or disk, - * current interval query being completed or not can be decided. - */ - saveIntervalQueryRange(pRuntimeEnv, pInfo); - assert(pQuery->lastKey > pBlockInfo->keyLast && pQuery->lastKey <= pQuery->ekey); - - /* - * if current block is the last block of current file, we still close the result flag, and - * merge with other meters in the same group - */ - if (queryCompleted) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); - } - } - - break; - } - } else { - if (pQuery->lastKey < pBlockInfo->keyFirst || pQuery->lastKey < pSupporter->rawEKey || - nextKey < pSupporter->rawEKey) { - if (pQuery->lastKey < pBlockInfo->keyFirst) { - assert(pQuery->ekey <= pBlockInfo->keyFirst); - } - - if (pQuery->lastKey < pSupporter->rawEKey || (nextKey < pSupporter->rawEKey && nextKey != -1)) { - /* whole query completed, save result and abort */ - assert(queryCompleted); - saveResult(pSupporter, pInfo, pInfo->lastResRows); - - /* - * save the pQuery->lastKey for retrieve data in cache, actually, - * there will be no qualified data in cache. - */ - saveIntervalQueryRange(pRuntimeEnv, pInfo); - } else if (pQuery->ekey == pBlockInfo->keyFirst) { - // current interval query is completed, set the next query range on other data blocks if exist - int64_t prevEKey = pQuery->ekey; - - getAlignedIntervalQueryRange(pQuery, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); - saveIntervalQueryRange(pRuntimeEnv, pInfo); - - assert(queryCompleted && prevEKey > pQuery->skey); - if (pInfo->lastResRows > 0) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); - } - } else { - /* - * Data that satisfy current query range may locate in current block and blocks that are - * directly right next to current block. Therefore, we need to keep the query range(interval) - * unchanged until reaching the direct next data block, while only forwards the pQuery->lastKey. - * - * With the information of the directly next data block, whether locates in cache or disk, - * current interval query being completed or not can be decided. - */ - saveIntervalQueryRange(pRuntimeEnv, pInfo); - assert(pQuery->lastKey < pBlockInfo->keyFirst && pQuery->lastKey >= pQuery->ekey); - - /* - * if current block is the last block of current file, we still close the result - * flag, and merge with other meters in the same group - */ - if (queryCompleted) { - saveResult(pSupporter, pInfo, pInfo->lastResRows); - } - } - - break; + if (!completed) { + /* + * Data that satisfy current query range may locate in current block and blocks that are directly right + * next to current block. Therefore, we need to keep the query range(interval) unchanged until reaching + * the direct next data block, while only forwards the pQuery->lastKey. + * + * With the information of the directly next data block, whether locates in cache or disk, + * current interval query being completed or not can be decided. + */ + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + + if (QUERY_IS_ASC_QUERY(pQuery)) { + assert(pQuery->lastKey > pBlockInfo->keyLast && pQuery->lastKey <= pQuery->ekey); + } else { + assert(pQuery->lastKey < pBlockInfo->keyFirst && pQuery->lastKey >= pQuery->ekey); } + + break; } - - assert(queryCompleted); - saveResult(pSupporter, pInfo, pInfo->lastResRows); - + + assert(completed); + + // while the interval time window is less than the time range gap between two points, nextKey may be greater than + // pSupporter->rawEKey + if (pQuery->ekey == pSupporter->rawEKey || (nextKey > pSupporter->rawEKey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextKey < pSupporter->rawEKey && !QUERY_IS_ASC_QUERY(pQuery))) { + /* whole query completed, save result and abort */ + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); + + // save the pQuery->lastKey for retrieve data in cache, actually, there will be no qualified data in cache. + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + + return; + } else if ((QUERY_IS_ASC_QUERY(pQuery) && pQuery->ekey == pBlockInfo->keyLast) || + (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->ekey == pBlockInfo->keyFirst)) { + /* current interval query is completed, set the next query range on other data blocks if exist */ + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + return; + } + + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); + + assert(pos >= 0 && pos < pBlockInfo->size); assert((nextKey >= pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || - (nextKey <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); - + (nextKey <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); + /* still in the same block to query */ - getAlignedIntervalQueryRange(pQuery, nextKey, pSupporter->rawSKey, pSupporter->rawEKey); - saveIntervalQueryRange(pRuntimeEnv, pInfo); - + getAlignedIntervalQueryRange(pRuntimeEnv, nextKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); + int32_t newPos = searchFn((char *)pPrimaryCol, pBlockInfo->size, pQuery->skey, pQuery->order.order); assert(newPos == pQuery->pos + steps * factor); - + pQuery->pos = newPos; } + } - int64_t getNextAccessedKeyInData(SQuery *pQuery, int64_t *pPrimaryCol, SBlockInfo *pBlockInfo, int32_t blockStatus) { assert(pQuery->pos >= 0 && pQuery->pos <= pBlockInfo->size - 1); @@ -6479,7 +7335,7 @@ void setIntervalQueryRange(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportO * last query on this block of the meter is done, start next interval on this block * otherwise, keep the previous query range and proceed */ - getAlignedIntervalQueryRange(pQuery, key, pSupporter->rawSKey, pSupporter->rawEKey); + getAlignedIntervalQueryRange(pRuntimeEnv, key, pSupporter->rawSKey, pSupporter->rawEKey); saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); // previous query does not be closed, save the results and close it @@ -6499,7 +7355,7 @@ void setIntervalQueryRange(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportO return; } - getAlignedIntervalQueryRange(pQuery, pQuery->skey, pSupporter->rawSKey, pSupporter->rawEKey); + getAlignedIntervalQueryRange(pRuntimeEnv, pQuery->skey, pSupporter->rawSKey, pSupporter->rawEKey); saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); pMeterQueryInfo->queryRangeSet = 1; } @@ -6545,8 +7401,8 @@ bool needPrimaryTimestampCol(SQuery *pQuery, SBlockInfo *pBlockInfo) { int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blkStatus, SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIdx, int32_t slotIdx, __block_search_fn_t searchFn, bool onDemand) { - SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; @@ -6569,13 +7425,13 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blk pQuery->pSelectExpr[i].pBase.colInfo.colId, *blkStatus); } - if (pRuntimeEnv->pTSBuf > 0) { + if (pRuntimeEnv->pTSBuf > 0 || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { req |= BLK_DATA_ALL_NEEDED; } } if (req == BLK_DATA_NO_NEEDED) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, data block ignored, brange:%lld-%lld, rows:%d", + qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, data block ignored, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); @@ -6606,14 +7462,15 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blk dTrace("QInfo:%p fileId:%d, slot:%d, block discarded by per-filter, ", GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot); #endif - qTrace("QInfo:%p id:%s slot:%d, data block ignored by pre-filter, fields loaded, brange:%lld-%lld, rows:%d", + qTrace("QInfo:%p id:%s slot:%d, data block ignored by pre-filter, fields loaded, brange:%" PRId64 "-%" PRId64 + ", rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); return DISK_DATA_DISCARDED; } } - SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_FILE_BLOCK); bool loadTS = needPrimaryTimestampCol(pQuery, &binfo); /* @@ -6658,28 +7515,32 @@ bool onDemandLoadDatablock(SQuery *pQuery, int16_t queryRangeSet) { static void validateResultBuf(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pSupporter->runtimeEnv.pQuery; + SQueryResultBuf* pResultBuf = pRuntimeEnv->pResultBuf; - tFilePage *newOutput = getFilePage(pSupporter, pMeterQueryInfo->pageList[pMeterQueryInfo->numOfPages - 1]); + SIDList list = getDataBufPagesIdList(pResultBuf, pMeterQueryInfo->sid); + int32_t id = getLastPageId(&list); + + tFilePage* newOutput = getResultBufferPageById(pResultBuf, id); for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { assert(pRuntimeEnv->pCtx[i].aOutputBuf - newOutput->data < DEFAULT_INTERN_BUF_SIZE); } } -void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult) { +int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; // no results generated, do nothing for master scan if (numOfResult <= 0) { if (IS_MASTER_SCAN(pRuntimeEnv)) { - return; + return TSDB_CODE_SUCCESS; } else { /* * There is a case that no result generated during the the supplement scan, and during the main * scan also no result generated. The index can be backwards moved. * * However, if during the main scan, there is a result generated, such as applies count to timestamp, which - * always generates a result, but applies last query to a NULL column may fail to generate no results during the + * always generates a result, but applies last query to a NULL column may fail to generate results during the * supplement scan. * * NOTE: @@ -6697,7 +7558,7 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI setCtxOutputPointerForSupplementScan(pSupporter, pMeterQueryInfo); } - return; + return TSDB_CODE_SUCCESS; } } @@ -6712,21 +7573,25 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI pMeterQueryInfo->reverseIndex -= 1; setCtxOutputPointerForSupplementScan(pSupporter, pMeterQueryInfo); } else { - int32_t pageId = pMeterQueryInfo->pageList[pMeterQueryInfo->numOfPages - 1]; - tFilePage *pData = getFilePage(pSupporter, pageId); - + SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, pMeterQueryInfo->sid); + + int32_t pageId = getLastPageId(&list); + tFilePage* pData = getResultBufferPageById(pRuntimeEnv->pResultBuf, pageId); + // in handling records occuring around '1970-01-01', the aligned start timestamp may be 0. TSKEY ts = *(TSKEY *)getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, 0); - + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - qTrace("QInfo:%p vid:%d sid:%d id:%s, save results, ts:%lld, total:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, - pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->numOfRes + 1); + qTrace("QInfo:%p vid:%d sid:%d id:%s, save results, ts:%" PRId64 ", total:%d", GET_QINFO_ADDR(pQuery), + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->numOfRes + 1); pData->numOfElems += numOfResult; pMeterQueryInfo->numOfRes += numOfResult; assert(pData->numOfElems <= pRuntimeEnv->numOfRowsPerPage); - setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo); + if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) { + return -1; + } for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { resetResultInfo(&pMeterQueryInfo->resultInfo[i]); @@ -6743,20 +7608,22 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI sc[1].bytes = 8; UNUSED(sc); - tColModel *cm = tColModelCreate(sc, pQuery->numOfOutputCols, pRuntimeEnv->numOfRowsPerPage); + SColumnModel *cm = createColumnModel(sc, pQuery->numOfOutputCols, pRuntimeEnv->numOfRowsPerPage); // if (outputPage->numOfElems + numOfResult >= pRuntimeEnv->numOfRowsPerPage) tColModelDisplay(cm, outputPage->data, outputPage->numOfElems, pRuntimeEnv->numOfRowsPerPage); #endif } + + return TSDB_CODE_SUCCESS; } -static int32_t getSubsetNumber(SMeterQuerySupportObj *pSupporter) { +static int32_t getNumOfSubset(SMeterQuerySupportObj *pSupporter) { SQuery *pQuery = pSupporter->runtimeEnv.pQuery; int32_t totalSubset = 0; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - totalSubset = pSupporter->runtimeEnv.usedIndex; + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->nAggTimeInterval > 0 && pQuery->slidingTime > 0)) { + totalSubset = numOfClosedSlidingWindow(&pSupporter->runtimeEnv.swindowResInfo); } else { totalSubset = pSupporter->pSidSet->numOfSubSet; } @@ -6774,7 +7641,7 @@ static int32_t doCopyFromGroupBuf(SMeterQuerySupportObj *pSupporter, SOutputRes dTrace("QInfo:%p start to copy data to dest buf", GET_QINFO_ADDR(pSupporter->runtimeEnv.pQuery)); - int32_t totalSubset = getSubsetNumber(pSupporter); + int32_t totalSubset = getNumOfSubset(pSupporter); if (orderType == TSQL_SO_ASC) { startIdx = pSupporter->subgroupIdx; @@ -6792,8 +7659,6 @@ static int32_t doCopyFromGroupBuf(SMeterQuerySupportObj *pSupporter, SOutputRes assert(result[i].numOfRows >= 0 && pSupporter->offset <= 1); - tFilePage **srcBuf = result[i].result; - int32_t numOfRowsToCopy = result[i].numOfRows - pSupporter->offset; int32_t oldOffset = pSupporter->offset; @@ -6809,8 +7674,8 @@ static int32_t doCopyFromGroupBuf(SMeterQuerySupportObj *pSupporter, SOutputRes for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { int32_t elemSize = pRuntimeEnv->pCtx[j].outputBytes; char * outputBuf = pQuery->sdata[j]->data + numOfResult * elemSize; - - memcpy(outputBuf, srcBuf[j]->data + oldOffset * elemSize, elemSize * numOfRowsToCopy); + char* p = getPosInResultPage(pRuntimeEnv, j, &result[i]); + memcpy(outputBuf, p + oldOffset * elemSize, elemSize * numOfRowsToCopy); } numOfResult += numOfRowsToCopy; @@ -6836,36 +7701,27 @@ void copyFromGroupBuf(SQInfo *pQInfo, SOutputRes *result) { SQuery * pQuery = &pQInfo->query; SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; - int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSQL_SO_DESC; - + int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSQL_SO_ASC; int32_t numOfResult = doCopyFromGroupBuf(pSupporter, result, orderType); pQuery->pointsRead += numOfResult; assert(pQuery->pointsRead <= pQuery->pointsToRead); } -// todo refactor according to its called env!! -static void getAlignedIntervalQueryRange(SQuery *pQuery, TSKEY keyInData, TSKEY skey, TSKEY ekey) { - if (pQuery->nAggTimeInterval == 0) { - return; - } - - doGetAlignedIntervalQueryRange(pQuery, keyInData, skey, ekey); -} - -static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, - int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, - SField *pFields, __block_search_fn_t searchFn) { +static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pMeterDataInfo, + SBlockInfo *pBlockInfo, int32_t blockStatus, SField *pFields, + __block_search_fn_t searchFn) { SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterQueryInfo * pInfo = pInfoEx->pMeterQInfo; + SMeterQueryInfo * pMeterQueryInfo = pMeterDataInfo->pMeterQInfo; + int64_t* pPrimaryKey = (int64_t*) pRuntimeEnv->primaryColBuffer->data; /* * for each block, we need to handle the previous query, since the determination of previous query being completed * or not is based on the start key of current block. */ - TSKEY key = getNextAccessedKeyInData(pQuery, pPrimaryData, pBlockInfo, blockStatus); - setIntervalQueryRange(pInfoEx->pMeterQInfo, pSupporter, key); + TSKEY key = getNextAccessedKeyInData(pQuery, pPrimaryKey, pBlockInfo, blockStatus); + setIntervalQueryRange(pMeterDataInfo->pMeterQInfo, pSupporter, key); if (((pQuery->skey > pQuery->ekey) && QUERY_IS_ASC_QUERY(pQuery)) || ((pQuery->skey < pQuery->ekey) && !QUERY_IS_ASC_QUERY(pQuery))) { @@ -6876,18 +7732,18 @@ static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterD ((pBlockInfo->keyFirst > pQuery->ekey) && !QUERY_IS_ASC_QUERY(pQuery))) { int32_t numOfRes = 0; /* current block is included in this interval */ - int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryData, data, pFields, searchFn, &numOfRes); + int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryKey, pFields, searchFn, &numOfRes); assert(numOfRes <= 1 && numOfRes >= 0 && steps > 0); - if (pInfo->lastResRows == 0) { - pInfo->lastResRows = numOfRes; + if (pMeterQueryInfo->lastResRows == 0) { + pMeterQueryInfo->lastResRows = numOfRes; } else { - assert(pInfo->lastResRows == 1); + assert(pMeterQueryInfo->lastResRows == 1); } - saveIntervalQueryRange(pRuntimeEnv, pInfo); + saveIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); } else { - doApplyIntervalQueryOnBlock(pSupporter, pInfo, pBlockInfo, pPrimaryData, data, pFields, searchFn); + doApplyIntervalQueryOnBlock_rv(pSupporter, pMeterQueryInfo, pBlockInfo, pPrimaryKey, pFields, searchFn); } } @@ -6933,7 +7789,6 @@ bool vnodeHasRemainResults(void *handle) { SQuery * pQuery = pRuntimeEnv->pQuery; SInterpolationInfo *pInterpoInfo = &pRuntimeEnv->interpoInfo; - if (pQuery->limit.limit > 0 && pQInfo->pointsRead >= pQuery->limit.limit) { return false; } @@ -6973,7 +7828,7 @@ static int32_t resultInterpolate(SQInfo *pQInfo, tFilePage **data, tFilePage **p pSchema[i].type = pQuery->pSelectExpr[i].resType; } - tColModel *pModel = tColModelCreate(pSchema, pQuery->numOfOutputCols, pQuery->pointsToRead); + SColumnModel *pModel = createColumnModel(pSchema, pQuery->numOfOutputCols, pQuery->pointsToRead); char * srcData[TSDB_MAX_COLUMNS] = {0}; int32_t functions[TSDB_MAX_COLUMNS] = {0}; @@ -6987,7 +7842,7 @@ static int32_t resultInterpolate(SQInfo *pQInfo, tFilePage **data, tFilePage **p pQuery->nAggTimeInterval, (int64_t *)pDataSrc[0]->data, pModel, srcData, pQuery->defaultVal, functions, pRuntimeEnv->pMeterObj->pointsPerFileBlock); - tColModelDestroy(pModel); + destroyColumnModel(pModel); free(pSchema); return numOfRes; @@ -6998,7 +7853,7 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data SQuery * pQuery = &pQInfo->query; int tnumOfRows = vnodeList[pObj->vnode].cfg.rowsInFileBlock; - + // for metric query, bufIndex always be 0. for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { // pQInfo->bufIndex == 0 int32_t bytes = pQuery->pSelectExpr[col].resBytes; @@ -7032,7 +7887,7 @@ int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows) { // make sure file exist if (FD_VALID(fd)) { size_t s = lseek(fd, 0, SEEK_END); - dTrace("QInfo:%p ts comp data return, file:%s, size:%lld", pQInfo, pQuery->sdata[0]->data, s); + dTrace("QInfo:%p ts comp data return, file:%s, size:%zu", pQInfo, pQuery->sdata[0]->data, s); lseek(fd, 0, SEEK_SET); read(fd, data, s); @@ -7068,8 +7923,8 @@ int32_t vnodeQueryResultInterpolate(SQInfo *pQInfo, tFilePage **pDst, tFilePage int32_t ret = resultInterpolate(pQInfo, pDst, pDataSrc, numOfRows, numOfFinalRows); assert(ret == numOfFinalRows); + /* reached the start position of according to offset value, return immediately */ if (pQuery->limit.offset == 0) { - /* reached the start position of according to offset value, return immediately */ return ret; } @@ -7077,17 +7932,9 @@ int32_t vnodeQueryResultInterpolate(SQInfo *pQInfo, tFilePage **pDst, tFilePage ret -= pQuery->limit.offset; // todo !!!!there exactly number of interpo is not valid. // todo refactor move to the beginning of buffer - if (QUERY_IS_ASC_QUERY(pQuery)) { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].resBytes * pQuery->limit.offset, - ret * pQuery->pSelectExpr[i].resBytes); - } - } else { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memmove(pDst[i]->data + (pQuery->pointsToRead - ret) * pQuery->pSelectExpr[i].resBytes, - pDst[i]->data + (pQuery->pointsToRead - ret - pQuery->limit.offset) * pQuery->pSelectExpr[i].resBytes, - ret * pQuery->pSelectExpr[i].resBytes); - } + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].resBytes * pQuery->limit.offset, + ret * pQuery->pSelectExpr[i].resBytes); } pQuery->limit.offset = 0; return ret; @@ -7109,7 +7956,11 @@ void vnodePrintQueryStatistics(SMeterQuerySupportObj *pSupporter) { SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); SQueryCostSummary *pSummary = &pRuntimeEnv->summary; - pSummary->tmpBufferInDisk = pSupporter->bufSize; + if (pRuntimeEnv->pResultBuf == NULL) { + pSummary->tmpBufferInDisk = 0; + } else { + pSummary->tmpBufferInDisk = getResBufSize(pRuntimeEnv->pResultBuf); + } dTrace("QInfo:%p statis: comp blocks:%d, size:%d Bytes, elapsed time:%.2f ms", pQInfo, pSummary->readCompInfo, pSummary->totalCompInfoSize, pSummary->loadCompInfoUs / 1000.0); diff --git a/src/system/detail/src/vnodeQueryProcess.c b/src/system/detail/src/vnodeQueryProcess.c index dea865e5cdf2ba833b5f49c96fb98033e752e550..c243a78e837cbc0f1ad60d83a7786da3aae54d3a 100644 --- a/src/system/detail/src/vnodeQueryProcess.c +++ b/src/system/detail/src/vnodeQueryProcess.c @@ -85,14 +85,25 @@ static void setStartPositionForCacheBlock(SQuery *pQuery, SCacheBlock *pBlock, b } } -static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { +static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[i]); + if (pResInfo != NULL) { + pResInfo->complete = false; + } + } +} + +static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { SQuery * pQuery = &pQInfo->query; SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SQueryRuntimeEnv * pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; - SMeterObj *pTempMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid); + SMeterObj *pTempMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[0]->sid); assert(pTempMeterObj != NULL); __block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeterObj->searchAlgorithm]; @@ -107,11 +118,11 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe int32_t end = pSupporter->pSidSet->starterPos[groupIdx + 1] - 1; if (isQueryKilled(pQuery)) { - return pMeterInfo; + return; } for (int32_t k = start; k <= end; ++k) { - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[k]->sid); if (pMeterObj == NULL) { dError("QInfo:%p failed to find meterId:%d, continue", pQInfo, pMeterSidExtInfo[k]->sid); continue; @@ -121,7 +132,7 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe pRuntimeEnv->pMeterObj = pMeterObj; if (pMeterInfo[k].pMeterQInfo == NULL) { - pMeterInfo[k].pMeterQInfo = createMeterQueryInfo(pQuery, pSupporter->rawSKey, pSupporter->rawEKey); + pMeterInfo[k].pMeterQInfo = createMeterQueryInfo(pQuery, pMeterObj->sid, pSupporter->rawSKey, pSupporter->rawEKey); } if (pMeterInfo[k].pMeterObj == NULL) { // no data in disk for this meter, set its pointer @@ -147,8 +158,8 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { dTrace( - "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan data in cache. qrange:%lld-%lld, " - "lastKey:%lld", + "QInfo:%p vid:%d sid:%d id:%s, query completed, ignore data in cache. qrange:%" PRId64 "-%" PRId64 ", " + "lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); @@ -157,10 +168,14 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe setExecutionContext(pSupporter, pSupporter->pResult, k, pMeterInfo[k].groupIdx, pMeterQueryInfo); } else { - setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo); + int32_t ret = setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo); + if (ret != TSDB_CODE_SUCCESS) { + pQInfo->killed = 1; + return; + } } - qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); /* @@ -172,14 +187,14 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe */ TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, true); if (nextKey < 0) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in cache, cache blocks:%d, lastKey:%lld", pQInfo, + qTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in cache, cache blocks:%d, lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->numOfBlocks, pQuery->lastKey); continue; } // data in this block may be flushed to disk and this block is allocated to other meter // todo try with remain cache blocks - SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); if (pBlock == NULL) { continue; } @@ -192,16 +207,13 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; for (int32_t i = 0; i < pCacheInfo->maxBlocks; ++i) { - pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot); /* * 1. pBlock == NULL. The cache block may be flushed to disk, so it is not available, skip and try next - * - * 2. pBlock->numOfPoints == 0. There is a empty block, which is caused by allocate-and-write data into cache - * procedure. The block has been allocated but data has not been put into yet. If the block is the last - * block(newly allocated block), abort query. Otherwise, skip it and go on. + * The check for empty block is refactor to getCacheDataBlock function */ - if ((pBlock == NULL) || (pBlock->numOfPoints == 0)) { + if (pBlock == NULL) { if (ALL_CACHE_BLOCKS_CHECKED(pQuery)) { break; } @@ -212,8 +224,8 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe setStartPositionForCacheBlock(pQuery, pBlock, &firstCheckSlot); - TSKEY *primaryKeys = (TSKEY *)pBlock->offset[0]; - + TSKEY* primaryKeys = (TSKEY*) pRuntimeEnv->primaryColBuffer->data; + // in handling file data block, the timestamp range validation is done during fetching candidate file blocks if ((primaryKeys[pQuery->pos] > pSupporter->rawEKey && QUERY_IS_ASC_QUERY(pQuery)) || (primaryKeys[pQuery->pos] < pSupporter->rawEKey && !QUERY_IS_ASC_QUERY(pQuery))) { @@ -222,15 +234,14 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe // only record the key on last block SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus); - SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK); + SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_CACHE_BLOCK); - dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d", + dTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", fileId:%d, slot:%d, pos:%d, bstatus:%d", GET_QINFO_ADDR(pQuery), binfo.keyFirst, binfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, pRuntimeEnv->blockStatus); totalBlocks++; - queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pBlock, &binfo, &pMeterInfo[k], NULL, - searchFn); + queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, &binfo, &pMeterInfo[k], NULL, searchFn); if (ALL_CACHE_BLOCKS_CHECKED(pQuery)) { break; @@ -251,18 +262,16 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe dTrace("QInfo:%p complete check %d cache blocks, elapsed time:%.3fms", pQInfo, totalBlocks, time / 1000.0); setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - - return pMeterInfo; } -static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo) { +static void queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo) { SQuery * pQuery = &pQInfo->query; SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; SMeterDataBlockInfoEx *pDataBlockInfoEx = NULL; int32_t nAllocBlocksInfoSize = 0; - SMeterObj * pTempMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pMeterSidExtInfo[0]->sid); + SMeterObj * pTempMeter = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pMeterSidExtInfo[0]->sid); __block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeter->searchAlgorithm]; int32_t vnodeId = pTempMeter->vnode; @@ -291,8 +300,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe pQuery->fileId = fid; pSummary->numOfFiles++; - char *pHeaderFileData = vnodeGetHeaderFileData(pRuntimeEnv, vnodeId, fileIdx); - if (pHeaderFileData == NULL) { // failed to mmap header file into buffer, ignore current file, try next + if (vnodeGetHeaderFile(pRuntimeEnv, fileIdx) != TSDB_CODE_SUCCESS) { fid += step; continue; } @@ -300,15 +308,17 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe int32_t numOfQualifiedMeters = 0; assert(fileIdx == pRuntimeEnv->vnodeFileInfo.current); - SMeterDataInfo **pReqMeterDataInfo = vnodeFilterQualifiedMeters(pQInfo, vnodeId, fileIdx, pSupporter->pSidSet, - pMeterDataInfo, &numOfQualifiedMeters); - - if (pReqMeterDataInfo == NULL) { - dError("QInfo:%p failed to allocate memory to perform query processing, abort", pQInfo); - - pQInfo->code = TSDB_CODE_SERV_OUT_OF_MEMORY; + SMeterDataInfo **pReqMeterDataInfo = NULL; + int32_t ret = vnodeFilterQualifiedMeters(pQInfo, vnodeId, pSupporter->pSidSet, pMeterDataInfo, + &numOfQualifiedMeters, &pReqMeterDataInfo); + if (ret != TSDB_CODE_SUCCESS) { + dError("QInfo:%p failed to create meterdata struct to perform query processing, abort", pQInfo); + + tfree(pReqMeterDataInfo); + pQInfo->code = -ret; pQInfo->killed = 1; - return NULL; + + return; } dTrace("QInfo:%p file:%s, %d meters qualified", pQInfo, pVnodeFileInfo->dataFilePath, numOfQualifiedMeters); @@ -320,8 +330,18 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe continue; } - uint32_t numOfBlocks = getDataBlocksForMeters(pSupporter, pQuery, pHeaderFileData, numOfQualifiedMeters, - pVnodeFileInfo->headerFilePath, pReqMeterDataInfo); + uint32_t numOfBlocks = 0; + ret = getDataBlocksForMeters(pSupporter, pQuery, numOfQualifiedMeters, pVnodeFileInfo->headerFilePath, + pReqMeterDataInfo, &numOfBlocks); + if (ret != TSDB_CODE_SUCCESS) { + dError("QInfo:%p failed to get data block before scan data blocks, abort", pQInfo); + + tfree(pReqMeterDataInfo); + pQInfo->code = -ret; + pQInfo->killed = 1; + + return; + } dTrace("QInfo:%p file:%s, %d meters contains %d blocks to be checked", pQInfo, pVnodeFileInfo->dataFilePath, numOfQualifiedMeters, numOfBlocks); @@ -332,15 +352,15 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe continue; } - int32_t n = createDataBlocksInfoEx(pReqMeterDataInfo, numOfQualifiedMeters, &pDataBlockInfoEx, numOfBlocks, + ret = createDataBlocksInfoEx(pReqMeterDataInfo, numOfQualifiedMeters, &pDataBlockInfoEx, numOfBlocks, &nAllocBlocksInfoSize, (int64_t)pQInfo); - if (n < 0) { // failed to create data blocks - dError("QInfo:%p failed to allocate memory to perform query processing, abort", pQInfo); + if (ret != TSDB_CODE_SUCCESS) { // failed to create data blocks + dError("QInfo:%p build blockInfoEx failed, abort", pQInfo); tfree(pReqMeterDataInfo); - pQInfo->code = TSDB_CODE_SERV_OUT_OF_MEMORY; + pQInfo->code = -ret; pQInfo->killed = 1; - return NULL; + return; } dTrace("QInfo:%p start to load %d blocks and check", pQInfo, numOfBlocks); @@ -363,7 +383,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe stimeUnit = taosGetTimestampMs(); } else if ((j % TRACE_OUTPUT_BLOCK_CNT) == 0) { etimeUnit = taosGetTimestampMs(); - dTrace("QInfo:%p load and check %ld blocks, and continue. elapsed:%ldms", pQInfo, TRACE_OUTPUT_BLOCK_CNT, + dTrace("QInfo:%p load and check %" PRId64 " blocks, and continue. elapsed:%" PRId64 " ms", pQInfo, TRACE_OUTPUT_BLOCK_CNT, etimeUnit - stimeUnit); stimeUnit = taosGetTimestampMs(); } @@ -378,12 +398,12 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe restoreIntervalQueryRange(pRuntimeEnv, pMeterQueryInfo); - if (pQuery->nAggTimeInterval == 0) { // normal query + if (pQuery->nAggTimeInterval == 0 && !isSumAvgRateQuery(pQuery)) { // normal query if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { qTrace( - "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan this data block. qrange:%lld-%lld, " - "lastKey:%lld", + "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan this data block. qrange:%" PRId64 "-%" PRId64 ", " + "lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); @@ -393,7 +413,12 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe setExecutionContext(pSupporter, pSupporter->pResult, pOneMeterDataInfo->meterOrderIdx, pOneMeterDataInfo->groupIdx, pMeterQueryInfo); } else { // interval query - setIntervalQueryExecutionContext(pSupporter, pOneMeterDataInfo->meterOrderIdx, pMeterQueryInfo); + ret = setIntervalQueryExecutionContext(pSupporter, pOneMeterDataInfo->meterOrderIdx, pMeterQueryInfo); + if (ret != TSDB_CODE_SUCCESS) { + tfree(pReqMeterDataInfo); // error code has been set + pQInfo->killed = 1; + return; + } } SCompBlock *pBlock = pInfoEx->pBlock.compBlock; @@ -405,7 +430,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe continue; } - SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_FILE_BLOCK); assert(pQuery->pos >= 0 && pQuery->pos < pBlock->numOfPoints); TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; @@ -421,8 +446,8 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe (pBlock->keyFirst >= pQuery->ekey && pBlock->keyLast <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); } - queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pRuntimeEnv->colDataBuffer, &binfo, - pOneMeterDataInfo, pInfoEx->pBlock.fields, searchFn); + queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, &binfo, pOneMeterDataInfo, pInfoEx->pBlock.fields, + searchFn); } tfree(pReqMeterDataInfo); @@ -441,8 +466,6 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe setQueryStatus(pQuery, QUERY_NOT_COMPLETED); freeMeterBlockInfoEx(pDataBlockInfoEx, nAllocBlocksInfoSize); - - return pMeterDataInfo; } static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool *dataInCache, int32_t index, @@ -455,7 +478,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[index]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[index]->sid); if (pMeterObj == NULL) { dError("QInfo:%p do not find required meter id: %d, all meterObjs id is:", pQInfo, pMeterSidExtInfo[index]->sid); return false; @@ -463,18 +486,21 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * vnodeSetTagValueInParam(pSupporter->pSidSet, pRuntimeEnv, pMeterSidExtInfo[index]); - dTrace("QInfo:%p query on (%d): vid:%d sid:%d meterId:%s, qrange:%lld-%lld", pQInfo, index - start, pMeterObj->vnode, + dTrace("QInfo:%p query on (%d): vid:%d sid:%d meterId:%s, qrange:%" PRId64 "-%" PRId64, pQInfo, index - start, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey); pQInfo->pObj = pMeterObj; pQuery->lastKey = pQuery->skey; pRuntimeEnv->pMeterObj = pMeterObj; + + vnodeUpdateQueryColumnIndex(pQuery, pRuntimeEnv->pMeterObj); + vnodeUpdateFilterColumnIndex(pQuery); vnodeCheckIfDataExists(pRuntimeEnv, pMeterObj, dataInDisk, dataInCache); // data in file or cache is not qualified for the query. abort if (!(dataInCache || dataInDisk)) { - dTrace("QInfo:%p vid:%d sid:%d meterId:%s, qrange:%lld-%lld, nores, %p", pQInfo, pMeterObj->vnode, pMeterObj->sid, + dTrace("QInfo:%p vid:%d sid:%d meterId:%s, qrange:%" PRId64 "-%" PRId64 ", nores, %p", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery); return false; } @@ -493,6 +519,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * } } + initCtxOutputBuf(pRuntimeEnv); return true; } @@ -516,7 +543,7 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start SPointInterpoSupporter pointInterpSupporter = {0}; pointInterpSupporterInit(pQuery, &pointInterpSupporter); - if (!normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter)) { + if (!normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter, NULL)) { pointInterpSupporterDestroy(&pointInterpSupporter); return 0; } @@ -529,11 +556,9 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start pointInterpSupporterDestroy(&pointInterpSupporter); vnodeScanAllData(pRuntimeEnv); - + // first/last_row query, do not invoke the finalize for super table query - if (!isFirstLastRowQuery(pQuery)) { - doFinalizeResult(pRuntimeEnv); - } + doFinalizeResult(pRuntimeEnv); int64_t numOfRes = getNumOfResult(pRuntimeEnv); assert(numOfRes == 1 || numOfRes == 0); @@ -547,7 +572,14 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start return numOfRes; } -static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { +/** + * super table query handler + * 1. super table projection query, group-by on normal columns query, ts-comp query + * 2. point interpolation query, last row query + * + * @param pQInfo + */ +static void vnodeSTableSeqProcessor(SQInfo *pQInfo) { SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; @@ -556,11 +588,11 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; tSidSet *pSids = pSupporter->pSidSet; - SMeterObj *pOneMeter = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid); - - resetCtxOutputBuf(pRuntimeEnv); - + int32_t vid = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[0]->sid)->vnode; + if (isPointInterpoQuery(pQuery)) { + resetCtxOutputBuf(pRuntimeEnv); + assert(pQuery->limit.offset == 0 && pQuery->limit.limit != 0); while (pSupporter->subgroupIdx < pSids->numOfSubSet) { @@ -568,7 +600,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { int32_t end = pSids->starterPos[pSupporter->subgroupIdx + 1] - 1; if (isFirstLastRowQuery(pQuery)) { - dTrace("QInfo:%p last_row query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode, + dTrace("QInfo:%p last_row query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, vid, pSids->numOfSubSet, pSupporter->subgroupIdx); TSKEY key = -1; @@ -584,7 +616,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } // get the last key of meters that belongs to this group - SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid); + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[k]->sid); if (pMeterObj != NULL) { if (key < pMeterObj->lastKey) { key = pMeterObj->lastKey; @@ -601,7 +633,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { int64_t num = doCheckMetersInGroup(pQInfo, index, start); assert(num >= 0); } else { - dTrace("QInfo:%p interp query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode, + dTrace("QInfo:%p interp query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, vid, pSids->numOfSubSet, pSupporter->subgroupIdx); for (int32_t k = start; k <= end; ++k) { @@ -628,7 +660,9 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } } } else { - // this procedure treats all tables as single group + /* + * 1. super table projection query, 2. group-by on normal columns query, 3. ts-comp query + */ assert(pSupporter->meterIdx >= 0); /* @@ -647,18 +681,10 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { if (pSupporter->meterIdx >= pSids->numOfSids) { return; } - - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *pOneRes = &pRuntimeEnv->pResult[i]; - clearGroupResultBuf(pOneRes, pQuery->numOfOutputCols); - } - - pRuntimeEnv->usedIndex = 0; - taosCleanUpIntHash(pRuntimeEnv->hashList); - - int32_t primeHashSlot = 10039; - pRuntimeEnv->hashList = taosInitIntHash(primeHashSlot, POINTER_BYTES, taosHashInt); - + + resetCtxOutputBuf(pRuntimeEnv); + resetSlidingWindowInfo(pRuntimeEnv, &pRuntimeEnv->swindowResInfo); + while (pSupporter->meterIdx < pSupporter->numOfMeters) { int32_t k = pSupporter->meterIdx; @@ -666,6 +692,12 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); return; } + + + TSKEY skey = pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key; + if (skey > 0) { + pQuery->skey = skey; + } bool dataInDisk = true; bool dataInCache = true; @@ -684,7 +716,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { #endif SPointInterpoSupporter pointInterpSupporter = {0}; - if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter) == false) { + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter, NULL) == false) { pQuery->skey = pSupporter->rawSKey; pQuery->ekey = pSupporter->rawEKey; @@ -705,9 +737,6 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } } - vnodeUpdateQueryColumnIndex(pQuery, pRuntimeEnv->pMeterObj); - vnodeUpdateFilterColumnIndex(pQuery); - vnodeScanAllData(pRuntimeEnv); pQuery->pointsRead = getNumOfResult(pRuntimeEnv); @@ -718,7 +747,10 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pSupporter->meterIdx = pSupporter->pSidSet->numOfSids; break; } - + + // enable execution for next table, when handling the projection query + enableExecutionForNextTable(pRuntimeEnv); + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { /* * query range is identical in terms of all meters involved in query, @@ -730,14 +762,15 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pQuery->ekey = pSupporter->rawEKey; pSupporter->meterIdx++; + pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key = pQuery->lastKey; + // if the buffer is full or group by each table, we need to jump out of the loop if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL) || isGroupbyEachTable(pQuery->pGroupbyExpr, pSupporter->pSidSet)) { break; } - } else { - // forward query range + } else { // forward query range pQuery->skey = pQuery->lastKey; // all data in the result buffer are skipped due to the offset, continue to retrieve data from current meter @@ -745,6 +778,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { assert(!Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)); continue; } else { + pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key = pQuery->lastKey; // buffer is full, wait for the next round to retrieve data from current meter assert(Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)); break; @@ -753,7 +787,18 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { } } - if (!isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isFirstLastRowQuery(pQuery)) { + /* + * 1. super table projection query, group-by on normal columns query, ts-comp query + * 2. point interpolation query, last row query + * + * group-by on normal columns query and last_row query do NOT invoke the finalizer here, + * since the finalize stage will be done at the client side. + * + * projection query, point interpolation query do not need the finalizer. + * + * Only the ts-comp query requires the finalizer function to be executed here. + */ + if (isTSCompQuery(pQuery)) { doFinalizeResult(pRuntimeEnv); } @@ -761,9 +806,14 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur; } + // todo refactor if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) { - SOutputRes *buf = &pRuntimeEnv->pResult[i]; + SSlidingWindowInfo* pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo; + + for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) { + SOutputRes *buf = &pSlidingWindowInfo->pResult[i]; + pSlidingWindowInfo->pStatus[i].closed = true; // enable return all results for group by normal columns + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { buf->numOfRows = MAX(buf->numOfRows, buf->resultInfo[j].numOfRes); } @@ -771,18 +821,16 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pQInfo->pMeterQuerySupporter->subgroupIdx = 0; pQuery->pointsRead = 0; - copyFromGroupBuf(pQInfo, pRuntimeEnv->pResult); + copyFromGroupBuf(pQInfo, pSlidingWindowInfo->pResult); } pQInfo->pointsRead += pQuery->pointsRead; pQuery->pointsOffset = pQuery->pointsToRead; - moveDescOrderResultsToFront(pRuntimeEnv); - dTrace( "QInfo %p vid:%d, numOfMeters:%d, index:%d, numOfGroups:%d, %d points returned, totalRead:%d totalReturn:%d," - "next skey:%lld, offset:%lld", - pQInfo, pOneMeter->vnode, pSids->numOfSids, pSupporter->meterIdx, pSids->numOfSubSet, pQuery->pointsRead, + "next skey:%" PRId64 ", offset:%" PRId64, + pQInfo, vid, pSids->numOfSids, pSupporter->meterIdx, pSids->numOfSubSet, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->skey, pQuery->limit.offset); } @@ -791,26 +839,28 @@ static void doOrderedScan(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; if (QUERY_IS_ASC_QUERY(pQuery)) { - pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); if (pQInfo->code != TSDB_CODE_SUCCESS) { return; } - pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); } else { - pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); if (pQInfo->code != TSDB_CODE_SUCCESS) { return; } - pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); + queryOnMultiDataFiles(pQInfo, pSupporter->pMeterDataInfo); } } static void setupMeterQueryInfoForSupplementQuery(SMeterQuerySupportObj *pSupporter) { for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { SMeterQueryInfo *pMeterQueryInfo = pSupporter->pMeterDataInfo[i].pMeterQInfo; - changeMeterQueryInfoForSuppleQuery(pMeterQueryInfo, pSupporter->rawSKey, pSupporter->rawEKey); + SQueryResultBuf* pResultBuf = pSupporter->runtimeEnv.pResultBuf; + + changeMeterQueryInfoForSuppleQuery(pResultBuf, pMeterQueryInfo, pSupporter->rawSKey, pSupporter->rawEKey); } } @@ -887,10 +937,11 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { pSupporter->pMeterDataInfo = (SMeterDataInfo *)calloc(1, sizeof(SMeterDataInfo) * pSupporter->numOfMeters); if (pSupporter->pMeterDataInfo == NULL) { dError("QInfo:%p failed to allocate memory, %s", pQInfo, strerror(errno)); + pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY; return; } - dTrace("QInfo:%p query start, qrange:%lld-%lld, order:%d, group:%d", pQInfo, pSupporter->rawSKey, pSupporter->rawEKey, + dTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", order:%d, group:%d", pQInfo, pSupporter->rawSKey, pSupporter->rawEKey, pQuery->order.order, pSupporter->pSidSet->numOfSubSet); dTrace("QInfo:%p main query scan start", pQInfo); @@ -900,7 +951,12 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { dTrace("QInfo:%p main scan completed, elapsed time: %lldms, supplementary scan start, order:%d", pQInfo, et - st, pQuery->order.order ^ 1); - doCloseAllOpenedResults(pSupporter); + // failed to save all intermediate results into disk, abort further query processing + if (doCloseAllOpenedResults(pSupporter) != TSDB_CODE_SUCCESS) { + dError("QInfo:%p failed to save intermediate results, abort further query processing", pQInfo); + return; + } + doMultiMeterSupplementaryScan(pQInfo); if (isQueryKilled(pQuery)) { @@ -908,15 +964,16 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { return; } - if (pQuery->nAggTimeInterval > 0) { + if (pQuery->nAggTimeInterval > 0 || isSumAvgRateQuery(pQuery)) { assert(pSupporter->subgroupIdx == 0 && pSupporter->numOfGroupResultPages == 0); - mergeMetersResultToOneGroups(pSupporter); - copyResToQueryResultBuf(pSupporter, pQuery); - + if (mergeMetersResultToOneGroups(pSupporter) == TSDB_CODE_SUCCESS) { + copyResToQueryResultBuf(pSupporter, pQuery); + #ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); + displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); #endif + } } else { // not a interval query copyFromGroupBuf(pQInfo, pSupporter->pResult); } @@ -933,7 +990,7 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { * select count(*)/top(field,k)/avg(field name) from table_name [where ts>now-1a]; * select count(*) from table_name group by status_column; */ -static void vnodeSingleMeterFixedOutputProcessor(SQInfo *pQInfo) { +static void vnodeSingleTableFixedOutputProcessor(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; @@ -956,20 +1013,13 @@ static void vnodeSingleMeterFixedOutputProcessor(SQInfo *pQInfo) { assert(isTopBottomQuery(pQuery)); } - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - pQInfo->pMeterQuerySupporter->subgroupIdx = 0; - pQuery->pointsRead = 0; - copyFromGroupBuf(pQInfo, pRuntimeEnv->pResult); - } - doSkipResults(pRuntimeEnv); doRevisedResultsByLimit(pQInfo); - moveDescOrderResultsToFront(pRuntimeEnv); pQInfo->pointsRead = pQuery->pointsRead; } -static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) { +static void vnodeSingleTableMultiOutputProcessor(SQInfo *pQInfo) { SQuery * pQuery = &pQInfo->query; SMeterObj *pMeterObj = pQInfo->pObj; @@ -1004,22 +1054,20 @@ static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) { TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); - dTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%lld, next qrange:%lld-%lld", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%" PRId64 ", next qrange:%" PRId64 "-%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->limit.offset, pQuery->lastKey, pQuery->ekey); resetCtxOutputBuf(pRuntimeEnv); } doRevisedResultsByLimit(pQInfo); - moveDescOrderResultsToFront(pRuntimeEnv); - pQInfo->pointsRead += pQuery->pointsRead; if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) { TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); - dTrace("QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%lld-%lld", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->lastKey, pQuery->ekey); } @@ -1040,7 +1088,8 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter (pQuery->skey >= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))); initCtxOutputBuf(pRuntimeEnv); - + clearCompletedSlidingWindows(pRuntimeEnv); + vnodeScanAllData(pRuntimeEnv); if (isQueryKilled(pQuery)) { return; @@ -1071,7 +1120,7 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter } forwardIntervalQueryRange(pSupporter, pRuntimeEnv); - if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED)) { + if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED|QUERY_RESBUF_FULL)) { break; } @@ -1089,7 +1138,7 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter } /* handle time interval query on single table */ -static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) { +static void vnodeSingleTableIntervalProcessor(SQInfo *pQInfo) { SQuery * pQuery = &(pQInfo->query); SMeterObj *pMeterObj = pQInfo->pObj; @@ -1110,17 +1159,8 @@ static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) { taosInterpoSetStartInfo(&pRuntimeEnv->interpoInfo, pQuery->pointsRead, pQuery->interpoType); SData **pInterpoBuf = pRuntimeEnv->pInterpoBuf; - if (QUERY_IS_ASC_QUERY(pQuery)) { - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memcpy(pInterpoBuf[i]->data, pQuery->sdata[i]->data, pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); - } - } else { - int32_t size = pMeterObj->pointsPerFileBlock; - for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { - memcpy(pInterpoBuf[i]->data, - pQuery->sdata[i]->data + (size - pQuery->pointsRead) * pQuery->pSelectExpr[i].resBytes, - pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); - } + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memcpy(pInterpoBuf[i]->data, pQuery->sdata[i]->data, pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); } numOfInterpo = 0; @@ -1137,18 +1177,22 @@ static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) { pQuery->pointsRead = 0; } } + + if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0)) { + pQInfo->pMeterQuerySupporter->subgroupIdx = 0; + pQuery->pointsRead = 0; + copyFromGroupBuf(pQInfo, pRuntimeEnv->swindowResInfo.pResult); + } pQInfo->pointsRead += pQuery->pointsRead; pQInfo->pointsInterpo += numOfInterpo; - moveDescOrderResultsToFront(pRuntimeEnv); - dTrace("%p vid:%d sid:%d id:%s, %d points returned %d points interpo, totalRead:%d totalInterpo:%d totalReturn:%d", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, numOfInterpo, pQInfo->pointsRead - pQInfo->pointsInterpo, pQInfo->pointsInterpo, pQInfo->pointsReturned); } -void vnodeSingleMeterQuery(SSchedMsg *pMsg) { +void vnodeSingleTableQuery(SSchedMsg *pMsg) { SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; if (pQInfo == NULL || pQInfo->pMeterQuerySupporter == NULL) { @@ -1157,12 +1201,13 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { } if (pQInfo->killed) { - TSDB_QINFO_RESET_SIG(pQInfo); - dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + dTrace("QInfo:%p it is already killed, abort", pQInfo); + vnodeDecRefCount(pQInfo); + return; } - assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); + assert(pQInfo->refCount >= 1); SQuery * pQuery = &pQInfo->query; SMeterObj *pMeterObj = pQInfo->pObj; @@ -1185,7 +1230,6 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { (tFilePage **)pRuntimeEnv->pInterpoBuf, remain, &numOfInterpo); doRevisedResultsByLimit(pQInfo); - moveDescOrderResultsToFront(pRuntimeEnv); pQInfo->pointsInterpo += numOfInterpo; pQInfo->pointsRead += pQuery->pointsRead; @@ -1196,10 +1240,8 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, numOfInterpo, pQInfo->pointsRead, pQInfo->pointsInterpo, pQInfo->pointsReturned); - dTrace("QInfo:%p reset signature", pQInfo); - - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); return; } @@ -1218,23 +1260,22 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsInterpo, pQInfo->pointsReturned); - dTrace("QInfo:%p reset signature", pQInfo); - - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); + return; } } } pQInfo->over = 1; - dTrace("QInfo:%p vid:%d sid:%d id:%s, query over, %d points are returned, reset signature", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, query over, %d points are returned", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQInfo->pointsRead); vnodePrintQueryStatistics(pQInfo->pMeterQuerySupporter); - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); - + + vnodeDecRefCount(pQInfo); return; } @@ -1244,16 +1285,17 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { int64_t st = taosGetTimestampUs(); - if (pQuery->nAggTimeInterval != 0) { // interval (down sampling operation) + // group by normal column, sliding window query, interval query are handled by interval query processor + if (pQuery->nAggTimeInterval != 0 || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // interval (down sampling operation) assert(pQuery->checkBufferInLoop == 0 && pQuery->pointsOffset == pQuery->pointsToRead); - vnodeSingleMeterIntervalProcessor(pQInfo); + vnodeSingleTableIntervalProcessor(pQInfo); } else { if (isFixedOutputQuery(pQuery)) { assert(pQuery->checkBufferInLoop == 0); - vnodeSingleMeterFixedOutputProcessor(pQInfo); + vnodeSingleTableFixedOutputProcessor(pQInfo); } else { // diff/add/multiply/subtract/division assert(pQuery->checkBufferInLoop == 1); - vnodeSingleMeterMultiOutputProcessor(pQInfo); + vnodeSingleTableMultiOutputProcessor(pQInfo); } } @@ -1262,15 +1304,15 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) { /* check if query is killed or not */ if (isQueryKilled(pQuery)) { - dTrace("QInfo:%p query is killed, reset signature", pQInfo); + dTrace("QInfo:%p query is killed", pQInfo); pQInfo->over = 1; } else { - dTrace("QInfo:%p vid:%d sid:%d id:%s, meter query thread completed, %d points are returned, reset signature", + dTrace("QInfo:%p vid:%d sid:%d id:%s, meter query thread completed, %d points are returned", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead); } - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); } void vnodeMultiMeterQuery(SSchedMsg *pMsg) { @@ -1281,12 +1323,12 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) { } if (pQInfo->killed) { - TSDB_QINFO_RESET_SIG(pQInfo); - dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + vnodeDecRefCount(pQInfo); + dTrace("QInfo:%p it is already killed, abort", pQInfo); return; } - assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); + assert(pQInfo->refCount >= 1); SQuery *pQuery = &pQInfo->query; pQuery->pointsRead = 0; @@ -1300,14 +1342,13 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) { assert((pQuery->checkBufferInLoop == 1 && pQuery->nAggTimeInterval == 0) || isPointInterpoQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)); - vnodeMultiMeterMultiOutputProcessor(pQInfo); + vnodeSTableSeqProcessor(pQInfo); } /* record the total elapsed time */ pQInfo->useconds += (taosGetTimestampUs() - st); pQInfo->over = isQueryKilled(pQuery) ? 1 : 0; - dTrace("QInfo:%p reset signature", pQInfo); taosInterpoSetStartInfo(&pQInfo->pMeterQuerySupporter->runtimeEnv.interpoInfo, pQuery->pointsRead, pQInfo->query.interpoType); @@ -1315,11 +1356,11 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) { if (pQuery->pointsRead == 0) { pQInfo->over = 1; - dTrace("QInfo:%p over, %d meters queried, %d points are returned, reset signature", pQInfo, pSupporter->numOfMeters, + dTrace("QInfo:%p over, %d meters queried, %d points are returned", pQInfo, pSupporter->numOfMeters, pQInfo->pointsRead); vnodePrintQueryStatistics(pSupporter); } - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); } diff --git a/src/system/detail/src/vnodeRead.c b/src/system/detail/src/vnodeRead.c index 81e4f6e370ae0a8591ec7c63fcdabc53732df2ec..71dd088ae97b8110c85cc3733c6500ddc52e06f4 100644 --- a/src/system/detail/src/vnodeRead.c +++ b/src/system/detail/src/vnodeRead.c @@ -25,8 +25,8 @@ #include "vnode.h" #include "vnodeRead.h" #include "vnodeUtil.h" - -#pragma GCC diagnostic ignored "-Wint-conversion" +#include "hash.h" +#include "hashutil.h" int (*pQueryFunc[])(SMeterObj *, SQuery *) = {vnodeQueryFromCache, vnodeQueryFromFile}; @@ -195,8 +195,6 @@ static SQInfo *vnodeAllocateQInfoCommon(SQueryMeterMsg *pQueryMsg, SMeterObj *pM } else { pQuery->colList[i].data.filters = NULL; } - - pQuery->dataRowSize += colList[i].bytes; } vnodeUpdateQueryColumnIndex(pQuery, pMeterObj); @@ -269,6 +267,7 @@ static SQInfo *vnodeAllocateQInfoEx(SQueryMeterMsg *pQueryMsg, SSqlGroupbyExpr * pQuery->pGroupbyExpr = pGroupbyExpr; pQuery->nAggTimeInterval = pQueryMsg->nAggTimeInterval; + pQuery->slidingTime = pQueryMsg->slidingTime; pQuery->interpoType = pQueryMsg->interpoType; pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit; @@ -394,26 +393,15 @@ __clean_memory: return NULL; } -static void vnodeFreeQInfoInQueueImpl(SSchedMsg *pMsg) { - SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; - vnodeFreeQInfo(pQInfo, true); -} - void vnodeFreeQInfoInQueue(void *param) { SQInfo *pQInfo = (SQInfo *)param; if (!vnodeIsQInfoValid(pQInfo)) return; pQInfo->killed = 1; - - dTrace("QInfo:%p set kill flag and add to queue, stop query ASAP", pQInfo); - SSchedMsg schedMsg = {0}; - schedMsg.fp = vnodeFreeQInfoInQueueImpl; - - schedMsg.msg = NULL; - schedMsg.thandle = (void *)1; - schedMsg.ahandle = param; - taosScheduleTask(queryQhandle, &schedMsg); + dTrace("QInfo:%p set kill flag to free QInfo"); + + vnodeDecRefCount(pQInfo); } void vnodeFreeQInfo(void *param, bool decQueryRef) { @@ -421,8 +409,6 @@ void vnodeFreeQInfo(void *param, bool decQueryRef) { if (!vnodeIsQInfoValid(param)) return; pQInfo->killed = 1; - TSDB_WAIT_TO_SAFE_DROP_QINFO(pQInfo); - SMeterObj *pObj = pQInfo->pObj; dTrace("QInfo:%p start to free SQInfo", pQInfo); @@ -501,7 +487,30 @@ bool vnodeIsQInfoValid(void *param) { * into local variable, then compare by using local variable */ uint64_t sig = pQInfo->signature; - return (sig == (uint64_t)pQInfo) || (sig == TSDB_QINFO_QUERY_FLAG); + return (sig == (uint64_t)pQInfo); +} + +void vnodeDecRefCount(void *param) { + SQInfo *pQInfo = (SQInfo*) param; + + assert(vnodeIsQInfoValid(pQInfo)); + + int32_t ref = atomic_sub_fetch_32(&pQInfo->refCount, 1); + assert(ref >= 0); + + dTrace("QInfo:%p decrease obj refcount, %d", pQInfo, ref); + if (ref == 0) { + vnodeFreeQInfo(pQInfo, true); + } +} + +void vnodeAddRefCount(void *param) { + SQInfo *pQInfo = (SQInfo*) param; + + assert(vnodeIsQInfoValid(pQInfo)); + + int32_t ref = atomic_add_fetch_32(&pQInfo->refCount, 1); + dTrace("QInfo:%p add refcount, %d", pQInfo, ref); } void vnodeQueryData(SSchedMsg *pMsg) { @@ -511,12 +520,11 @@ void vnodeQueryData(SSchedMsg *pMsg) { pQInfo = (SQInfo *)pMsg->ahandle; if (pQInfo->killed) { - TSDB_QINFO_RESET_SIG(pQInfo); - dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + dTrace("QInfo:%p it is already killed, abort", pQInfo); + vnodeDecRefCount(pQInfo); return; } - assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); pQuery = &(pQInfo->query); SMeterObj *pObj = pQInfo->pObj; @@ -562,13 +570,13 @@ void vnodeQueryData(SSchedMsg *pMsg) { pQuery->slot = -1; // reset the handle pQuery->over = 0; - dTrace("vid:%d sid:%d id:%s, query in other media, order:%d, skey:%lld query:%p", pObj->vnode, pObj->sid, + dTrace("vid:%d sid:%d id:%s, query in other media, order:%d, skey:%" PRId64 " query:%p", pObj->vnode, pObj->sid, pObj->meterId, pQuery->order.order, pQuery->skey, pQuery); } pQInfo->pointsRead += pQuery->pointsRead; - dTrace("vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d last key:%lld, query:%p", pObj->vnode, + dTrace("vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d last key:%" PRId64 ", query:%p", pObj->vnode, pObj->sid, pObj->meterId, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->lastKey, pQuery); @@ -584,13 +592,11 @@ void vnodeQueryData(SSchedMsg *pMsg) { tclose(pQInfo->query.lfd); } - /* reset QInfo signature */ - dTrace("QInfo:%p reset signature", pQInfo); - TSDB_QINFO_RESET_SIG(pQInfo); sem_post(&pQInfo->dataReady); + vnodeDecRefCount(pQInfo); } -void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, +void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, SQueryMeterMsg *pQueryMsg, int32_t *code) { SQInfo *pQInfo; SQuery *pQuery; @@ -599,7 +605,7 @@ void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExp bool isProjQuery = vnodeIsProjectionQuery(pSqlExprs, pQueryMsg->numOfOutputCols); // todo pass the correct error code - if (isProjQuery) { + if (isProjQuery && pQueryMsg->tsLen == 0) { pQInfo = vnodeAllocateQInfo(pQueryMsg, pMeterObj, pSqlExprs); } else { pQInfo = vnodeAllocateQInfoEx(pQueryMsg, pGroupbyExpr, pSqlExprs, pMetersObj[0]); @@ -613,12 +619,17 @@ void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExp pQuery = &(pQInfo->query); dTrace("qmsg:%p create QInfo:%p, QInfo created", pQueryMsg, pQInfo); - pQuery->skey = pQueryMsg->skey; + SMeterSidExtInfo** pSids = (SMeterSidExtInfo**)pQueryMsg->pSidExtInfo; + if (pSids != NULL && pSids[0]->key > 0) { + pQuery->skey = pSids[0]->key; + } else { + pQuery->skey = pQueryMsg->skey; + } + pQuery->ekey = pQueryMsg->ekey; pQuery->lastKey = pQuery->skey; pQInfo->fp = pQueryFunc[pQueryMsg->order]; - pQInfo->num = pQueryMsg->num; if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) { dError("QInfo:%p vid:%d sid:%d meterId:%s, init dataReady sem failed, reason:%s", pQInfo, pMeterObj->vnode, @@ -629,7 +640,9 @@ void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExp SSchedMsg schedMsg = {0}; - if (!isProjQuery) { + if (isProjQuery && pQueryMsg->tsLen == 0) { + schedMsg.fp = vnodeQueryData; + } else { if (vnodeParametersSafetyCheck(pQuery) == false) { *code = TSDB_CODE_APP_ERROR; goto _error; @@ -638,8 +651,9 @@ void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExp SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj)); pSupporter->numOfMeters = 1; - pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt); - taosAddIntHash(pSupporter->pMeterObj, pMetersObj[0]->sid, (char *)&pMetersObj[0]); + pSupporter->pMetersHashTable = taosInitHashTable(pSupporter->numOfMeters, taosIntHash_32, false); + taosAddToHashTable(pSupporter->pMetersHashTable, (const char*) &pMetersObj[0]->sid, sizeof(pMeterObj[0].sid), + (char *)&pMetersObj[0], POINTER_BYTES); pSupporter->pSidSet = NULL; pSupporter->subgroupIdx = -1; @@ -661,23 +675,27 @@ void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExp } if (pQInfo->over == 1) { + vnodeAddRefCount(pQInfo); // for retrieve procedure return pQInfo; } - schedMsg.fp = vnodeSingleMeterQuery; - } else { - schedMsg.fp = vnodeQueryData; + schedMsg.fp = vnodeSingleTableQuery; } - // set in query flag - pQInfo->signature = TSDB_QINFO_QUERY_FLAG; - + /* + * The reference count, which is 2, is for both the current query thread and the future retrieve request, + * which will always be issued by client to acquire data or free SQInfo struct. + */ + vnodeAddRefCount(pQInfo); + vnodeAddRefCount(pQInfo); + schedMsg.msg = NULL; schedMsg.thandle = (void *)1; schedMsg.ahandle = pQInfo; - dTrace("QInfo:%p set query flag and prepare runtime environment completed, wait for schedule", pQInfo); - + dTrace("QInfo:%p set query flag and prepare runtime environment completed, ref:%d, wait for schedule", pQInfo, + pQInfo->refCount); + taosScheduleTask(queryQhandle, &schedMsg); return pQInfo; @@ -711,7 +729,6 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE pQuery->ekey = pQueryMsg->ekey; pQInfo->fp = pQueryFunc[pQueryMsg->order]; - pQInfo->num = pQueryMsg->num; if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) { dError("QInfo:%p vid:%d sid:%d id:%s, init dataReady sem failed, reason:%s", pQInfo, pMetersObj[0]->vnode, @@ -725,12 +742,12 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj)); pSupporter->numOfMeters = pQueryMsg->numOfSids; - pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt); + pSupporter->pMetersHashTable = taosInitHashTable(pSupporter->numOfMeters, taosIntHash_32, false); for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { - taosAddIntHash(pSupporter->pMeterObj, pMetersObj[i]->sid, (char *)&pMetersObj[i]); + taosAddToHashTable(pSupporter->pMetersHashTable, (const char*) &pMetersObj[i]->sid, sizeof(pMetersObj[i]->sid), (char *)&pMetersObj[i], + POINTER_BYTES); } - pSupporter->pMeterSidExtInfo = (SMeterSidExtInfo **)pQueryMsg->pSidExtInfo; int32_t sidElemLen = pQueryMsg->tagLength + sizeof(SMeterSidExtInfo); int32_t size = POINTER_BYTES * pQueryMsg->numOfSids + sidElemLen * pQueryMsg->numOfSids; @@ -744,12 +761,16 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE char *px = ((char *)pSupporter->pMeterSidExtInfo) + POINTER_BYTES * pQueryMsg->numOfSids; for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { - pSupporter->pMeterSidExtInfo[i] = (SMeterSidExtInfo *)px; - pSupporter->pMeterSidExtInfo[i]->sid = ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->sid; + SMeterSidExtInfo* pSrc = ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]; + SMeterSidExtInfo* pDst = (SMeterSidExtInfo *)px; + + pSupporter->pMeterSidExtInfo[i] = pDst; + pDst->sid = pSrc->sid; + pDst->uid = pSrc->uid; + pDst->key = pSrc->key; if (pQueryMsg->tagLength > 0) { - memcpy(pSupporter->pMeterSidExtInfo[i]->tags, ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->tags, - pQueryMsg->tagLength); + memcpy(pDst->tags, pSrc->tags, pQueryMsg->tagLength); } px += sidElemLen; } @@ -777,12 +798,13 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE goto _error; } + vnodeAddRefCount(pQInfo); if (pQInfo->over == 1) { return pQInfo; } - pQInfo->signature = TSDB_QINFO_QUERY_FLAG; - + vnodeAddRefCount(pQInfo); + schedMsg.msg = NULL; schedMsg.thandle = (void *)1; schedMsg.ahandle = pQInfo; @@ -824,11 +846,11 @@ int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t * } if (pQInfo->killed) { - dTrace("QInfo:%p it is already killed, %p, code:%d", pQInfo, pQuery, pQInfo->code); + dTrace("QInfo:%p query is killed, %p, code:%d", pQInfo, pQuery, pQInfo->code); if (pQInfo->code == TSDB_CODE_SUCCESS) { return TSDB_CODE_QUERY_CANCELLED; } else { // in case of not TSDB_CODE_SUCCESS, return the code to client - return pQInfo->code; + return abs(pQInfo->code); } } @@ -837,8 +859,13 @@ int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t * *rowSize = pQuery->rowSize; *timePrec = vnodeList[pQInfo->pObj->vnode].cfg.precision; - - if (pQInfo->code < 0) return -pQInfo->code; + + dTrace("QInfo:%p, retrieve data info completed, precision:%d, rowsize:%d, rows:%d, code:%d", pQInfo, *timePrec, + *rowSize, *numOfRows, pQInfo->code); + + if (pQInfo->code < 0) { // less than 0 means there are error existed. + return -pQInfo->code; + } return TSDB_CODE_SUCCESS; } @@ -857,25 +884,23 @@ int vnodeSaveQueryResult(void *handle, char *data, int32_t *size) { pQInfo->pointsRead); if (pQInfo->over == 0) { - //dTrace("QInfo:%p set query flag, oldSig:%p, func:%s", pQInfo, pQInfo->signature, __FUNCTION__); - dTrace("QInfo:%p set query flag, oldSig:%p", pQInfo, pQInfo->signature); - uint64_t oldSignature = TSDB_QINFO_SET_QUERY_FLAG(pQInfo); - - /* - * If SQInfo has been released, the value of signature cannot be equalled to the address of pQInfo, - * since in release function, the original value has been destroyed. However, this memory area may be reused - * by another function. It may be 0 or any value, but it is rarely still be equalled to the address of SQInfo. - */ - if (oldSignature == 0 || oldSignature != (uint64_t)pQInfo) { - dTrace("%p freed or killed, old sig:%p abort query", pQInfo, oldSignature); + #ifdef _TD_ARM_ + dTrace("QInfo:%p set query flag, sig:%" PRIu64 ", func:vnodeSaveQueryResult", pQInfo, pQInfo->signature); + #else + dTrace("QInfo:%p set query flag, sig:%" PRIu64 ", func:%s", pQInfo, pQInfo->signature, __FUNCTION__); + #endif + + if (pQInfo->killed == 1) { + dTrace("%p freed or killed, abort query", pQInfo); } else { + vnodeAddRefCount(pQInfo); dTrace("%p add query into task queue for schedule", pQInfo); - - SSchedMsg schedMsg; + + SSchedMsg schedMsg = {0}; if (pQInfo->pMeterQuerySupporter != NULL) { if (pQInfo->pMeterQuerySupporter->pSidSet == NULL) { - schedMsg.fp = vnodeSingleMeterQuery; + schedMsg.fp = vnodeSingleTableQuery; } else { // group by tag schedMsg.fp = vnodeMultiMeterQuery; } @@ -896,27 +921,27 @@ int vnodeSaveQueryResult(void *handle, char *data, int32_t *size) { static int32_t validateQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { if (pQueryMsg->nAggTimeInterval < 0) { - dError("qmsg:%p illegal value of aggTimeInterval %ld", pQueryMsg, pQueryMsg->nAggTimeInterval); + dError("qmsg:%p illegal value of aggTimeInterval %" PRId64 "", pQueryMsg, pQueryMsg->nAggTimeInterval); return -1; } if (pQueryMsg->numOfTagsCols < 0 || pQueryMsg->numOfTagsCols > TSDB_MAX_TAGS + 1) { - dError("qmsg:%p illegal value of numOfTagsCols %ld", pQueryMsg, pQueryMsg->numOfTagsCols); + dError("qmsg:%p illegal value of numOfTagsCols %d", pQueryMsg, pQueryMsg->numOfTagsCols); return -1; } if (pQueryMsg->numOfCols <= 0 || pQueryMsg->numOfCols > TSDB_MAX_COLUMNS) { - dError("qmsg:%p illegal value of numOfCols %ld", pQueryMsg, pQueryMsg->numOfCols); + dError("qmsg:%p illegal value of numOfCols %d", pQueryMsg, pQueryMsg->numOfCols); return -1; } if (pQueryMsg->numOfSids <= 0) { - dError("qmsg:%p illegal value of numOfSids %ld", pQueryMsg, pQueryMsg->numOfSids); + dError("qmsg:%p illegal value of numOfSids %d", pQueryMsg, pQueryMsg->numOfSids); return -1; } if (pQueryMsg->numOfGroupCols < 0) { - dError("qmsg:%p illegal value of numOfGroupbyCols %ld", pQueryMsg, pQueryMsg->numOfGroupCols); + dError("qmsg:%p illegal value of numOfGroupbyCols %d", pQueryMsg, pQueryMsg->numOfGroupCols); return -1; } @@ -945,14 +970,14 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pQueryMsg->ekey = htobe64(pQueryMsg->ekey); #endif - pQueryMsg->num = htonl(pQueryMsg->num); - pQueryMsg->order = htons(pQueryMsg->order); pQueryMsg->orderColId = htons(pQueryMsg->orderColId); pQueryMsg->queryType = htons(pQueryMsg->queryType); pQueryMsg->nAggTimeInterval = htobe64(pQueryMsg->nAggTimeInterval); + pQueryMsg->slidingTime = htobe64(pQueryMsg->slidingTime); + pQueryMsg->numOfTagsCols = htons(pQueryMsg->numOfTagsCols); pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols); pQueryMsg->numOfOutputCols = htons(pQueryMsg->numOfOutputCols); @@ -998,8 +1023,9 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { if (pDestFilterInfo->filterOnBinary) { pDestFilterInfo->len = htobe64(pFilterInfo->len); - pDestFilterInfo->pz = calloc(1, pDestFilterInfo->len + 1); - memcpy(pDestFilterInfo->pz, pMsg, pDestFilterInfo->len + 1); + + pDestFilterInfo->pz = (int64_t)calloc(1, pDestFilterInfo->len + 1); + memcpy((void*)pDestFilterInfo->pz, pMsg, pDestFilterInfo->len + 1); pMsg += (pDestFilterInfo->len + 1); } else { pDestFilterInfo->lowerBndi = htobe64(pFilterInfo->lowerBndi); @@ -1017,8 +1043,7 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { * 1. simple projection query on meters, we only record the pSqlFuncExprs[i].colIdx value * 2. for complex queries, whole SqlExprs object is required. */ - pQueryMsg->pSqlFuncExprs = malloc(POINTER_BYTES * pQueryMsg->numOfOutputCols); - + pQueryMsg->pSqlFuncExprs = (int64_t)malloc(POINTER_BYTES * pQueryMsg->numOfOutputCols); SSqlFuncExprMsg *pExprMsg = (SSqlFuncExprMsg *)pMsg; for (int32_t i = 0; i < pQueryMsg->numOfOutputCols; ++i) { @@ -1065,7 +1090,7 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pQueryMsg->colNameLen = htonl(pQueryMsg->colNameLen); if (hasArithmeticFunction) { // column name array assert(pQueryMsg->colNameLen > 0); - pQueryMsg->colNameList = pMsg; + pQueryMsg->colNameList = (int64_t)pMsg; pMsg += pQueryMsg->colNameLen; } @@ -1074,10 +1099,14 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pSids[0] = (SMeterSidExtInfo *)pMsg; pSids[0]->sid = htonl(pSids[0]->sid); - + pSids[0]->uid = htobe64(pSids[0]->uid); + pSids[0]->key = htobe64(pSids[0]->key); + for (int32_t j = 1; j < pQueryMsg->numOfSids; ++j) { pSids[j] = (SMeterSidExtInfo *)((char *)pSids[j - 1] + sizeof(SMeterSidExtInfo) + pQueryMsg->tagLength); pSids[j]->sid = htonl(pSids[j]->sid); + pSids[j]->uid = htobe64(pSids[j]->uid); + pSids[j]->key = htobe64(pSids[j]->key); } pMsg = (char *)pSids[pQueryMsg->numOfSids - 1]; @@ -1112,9 +1141,9 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { } } - dTrace("qmsg:%p query on %d meter(s), qrange:%lld-%lld, numOfGroupbyTagCols:%d, numOfTagCols:%d, timestamp order:%d, " - "tags order:%d, tags order col:%d, numOfOutputCols:%d, numOfCols:%d, interval:%lld, fillType:%d, comptslen:%d, limit:%lld, " - "offset:%lld", + dTrace("qmsg:%p query on %d meter(s), qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, numOfTagCols:%d, timestamp order:%d, " + "tags order:%d, tags order col:%d, numOfOutputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptslen:%d, limit:%" PRId64 ", " + "offset:%" PRId64, pQueryMsg, pQueryMsg->numOfSids, pQueryMsg->skey, pQueryMsg->ekey, pQueryMsg->numOfGroupCols, pQueryMsg->numOfTagsCols, pQueryMsg->order, pQueryMsg->orderType, pQueryMsg->orderByIdx, pQueryMsg->numOfOutputCols, pQueryMsg->numOfCols, pQueryMsg->nAggTimeInterval, pQueryMsg->interpoType, diff --git a/src/system/detail/src/vnodeShell.c b/src/system/detail/src/vnodeShell.c index 015e95e6650b14cc6a3b53d599492bf3b3accfce..3cf1c91ee2ab9e022e85ce6f36886dc2a00ac9ee 100644 --- a/src/system/detail/src/vnodeShell.c +++ b/src/system/detail/src/vnodeShell.c @@ -28,9 +28,8 @@ #include "vnodeRead.h" #include "vnodeUtil.h" #include "vnodeStore.h" -#include "tstatus.h" +#include "vnodeStatus.h" -#pragma GCC diagnostic ignored "-Wint-conversion" extern int tsMaxQueues; void * pShellServer = NULL; @@ -146,11 +145,9 @@ int vnodeInitShell() { if (numOfThreads < 1) numOfThreads = 1; memset(&rpcInit, 0, sizeof(rpcInit)); -#ifdef CLUSTER - rpcInit.localIp = tsInternalIp; -#else - rpcInit.localIp = "0.0.0.0"; -#endif + + rpcInit.localIp = tsAnyIp ? "0.0.0.0" : tsPrivateIp; + rpcInit.localPort = tsVnodeShellPort; rpcInit.label = "DND-shell"; rpcInit.numOfThreads = numOfThreads; @@ -218,7 +215,10 @@ void vnodeCloseShellVnode(int vnode) { if (shellList[vnode] == NULL) return; for (int i = 0; i < vnodeList[vnode].cfg.maxSessions; ++i) { - vnodeFreeQInfo(shellList[vnode][i].qhandle, true); + void* qhandle = shellList[vnode][i].qhandle; + if (qhandle != NULL) { + vnodeDecRefCount(qhandle); + } } int32_t* v = malloc(sizeof(int32_t)); @@ -301,7 +301,7 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { } if (pQueryMsg->vnode >= TSDB_MAX_VNODES || pQueryMsg->vnode < 0) { - dTrace("qmsg:%p,vid:%d is out of range", pQueryMsg, pQueryMsg->vnode); + dError("qmsg:%p,vid:%d is out of range", pQueryMsg, pQueryMsg->vnode); code = TSDB_CODE_INVALID_TABLE_ID; goto _query_over; } @@ -311,33 +311,32 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (pVnode->cfg.maxSessions == 0) { dError("qmsg:%p,vid:%d is not activated yet", pQueryMsg, pQueryMsg->vnode); vnodeSendVpeerCfgMsg(pQueryMsg->vnode); - code = TSDB_CODE_NOT_ACTIVE_TABLE; + code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _query_over; } if (!(pVnode->accessState & TSDB_VN_READ_ACCCESS)) { + dError("qmsg:%p,vid:%d access not allowed", pQueryMsg, pQueryMsg->vnode); code = TSDB_CODE_NO_READ_ACCESS; goto _query_over; } - - if (pQueryMsg->pSidExtInfo == 0) { - dTrace("qmsg:%p,SQueryMeterMsg wrong format", pQueryMsg); - code = TSDB_CODE_INVALID_QUERY_MSG; - goto _query_over; - } - + if (pVnode->meterList == NULL) { dError("qmsg:%p,vid:%d has been closed", pQueryMsg, pQueryMsg->vnode); code = TSDB_CODE_NOT_ACTIVE_VNODE; goto _query_over; } + if (pQueryMsg->pSidExtInfo == 0) { + dError("qmsg:%p,SQueryMeterMsg wrong format", pQueryMsg); + code = TSDB_CODE_INVALID_QUERY_MSG; + goto _query_over; + } + pSids = (SMeterSidExtInfo **)pQueryMsg->pSidExtInfo; for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { if (pSids[i]->sid >= pVnode->cfg.maxSessions || pSids[i]->sid < 0) { - dTrace("qmsg:%p sid:%d is out of range, valid range:[%d,%d]", pQueryMsg, pSids[i]->sid, 0, - pVnode->cfg.maxSessions); - + dError("qmsg:%p sid:%d out of range, valid range:[%d,%d]", pQueryMsg, pSids[i]->sid, 0, pVnode->cfg.maxSessions); code = TSDB_CODE_INVALID_TABLE_ID; goto _query_over; } @@ -356,7 +355,7 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { assert(incNumber <= pQueryMsg->numOfSids); pthread_mutex_unlock(&pVnode->vmutex); - if (code != TSDB_CODE_SUCCESS) { + if (code != TSDB_CODE_SUCCESS || pQueryMsg->numOfSids == 0) { // all the meters may have been dropped. goto _query_over; } @@ -373,14 +372,16 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (pObj->qhandle) { dTrace("QInfo:%p %s free qhandle", pObj->qhandle, __FUNCTION__); - vnodeFreeQInfo(pObj->qhandle, true); + void* qHandle = pObj->qhandle; pObj->qhandle = NULL; + + vnodeDecRefCount(qHandle); } if (QUERY_IS_STABLE_QUERY(pQueryMsg->queryType)) { pObj->qhandle = vnodeQueryOnMultiMeters(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); } else { - pObj->qhandle = vnodeQueryInTimeRange(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); + pObj->qhandle = vnodeQueryOnSingleTable(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); } _query_over: @@ -393,7 +394,7 @@ _query_over: tfree(pMeterObjList); ret = vnodeSendQueryRspMsg(pObj, code, pObj->qhandle); - free(pQueryMsg->pSidExtInfo); + tfree(pQueryMsg->pSidExtInfo); for(int32_t i = 0; i < pQueryMsg->numOfCols; ++i) { vnodeFreeColumnInfo(&pQueryMsg->colList[i]); } @@ -416,6 +417,7 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { int code = 0; pRetrieve = (SRetrieveMeterMsg *)pMsg; + SQInfo* pQInfo = (SQInfo*)pRetrieve->qhandle; pRetrieve->free = htons(pRetrieve->free); if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) { @@ -440,14 +442,22 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { if (code == TSDB_CODE_SUCCESS) { size = vnodeGetResultSize((void *)(pRetrieve->qhandle), &numOfRows); + + // buffer size for progress information, including meter count, + // and for each meter, including 'uid' and 'TSKEY'. + int progressSize = 0; + if (pQInfo->pMeterQuerySupporter != NULL) + progressSize = pQInfo->pMeterQuerySupporter->numOfMeters * (sizeof(int64_t) + sizeof(TSKEY)) + sizeof(int32_t); + else if (pQInfo->pObj != NULL) + progressSize = sizeof(int64_t) + sizeof(TSKEY) + sizeof(int32_t); + + pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, progressSize + size + 100); + if (pStart == NULL) { + taosSendSimpleRsp(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); + goto _exit; + } } - pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, size + 100); - if (pStart == NULL) { - taosSendSimpleRsp(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); - goto _exit; - } - pMsg = pStart; *pMsg = code; @@ -458,7 +468,7 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { pRsp->precision = htons(timePrec); if (code == TSDB_CODE_SUCCESS) { - pRsp->offset = htobe64(vnodeGetOffsetVal(pRetrieve->qhandle)); + pRsp->offset = htobe64(vnodeGetOffsetVal((void*)pRetrieve->qhandle)); pRsp->useconds = htobe64(((SQInfo *)(pRetrieve->qhandle))->useconds); } else { pRsp->offset = 0; @@ -472,11 +482,41 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { } pMsg += size; + + // write the progress information of each meter to response + // this is required by subscriptions + if (pQInfo != NULL ) { + if (pQInfo->pMeterQuerySupporter != NULL && pQInfo->pMeterQuerySupporter->pMeterSidExtInfo != NULL) { + *((int32_t *)pMsg) = htonl(pQInfo->pMeterQuerySupporter->numOfMeters); + pMsg += sizeof(int32_t); + for (int32_t i = 0; i < pQInfo->pMeterQuerySupporter->numOfMeters; i++) { + *((int64_t *)pMsg) = htobe64(pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[i]->uid); + pMsg += sizeof(int64_t); + *((TSKEY *)pMsg) = htobe64(pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[i]->key); + pMsg += sizeof(TSKEY); + } + } else if (pQInfo->pObj != NULL) { + *((int32_t *)pMsg) = htonl(1); + pMsg += sizeof(int32_t); + *((int64_t *)pMsg) = htobe64(pQInfo->pObj->uid); + pMsg += sizeof(int64_t); + if (pQInfo->pointsRead > 0) { + *((TSKEY *)pMsg) = htobe64(pQInfo->query.lastKey + 1); + } else { + *((TSKEY *)pMsg) = htobe64(pQInfo->query.lastKey); + } + pMsg += sizeof(TSKEY); + } + } + msgLen = pMsg - pStart; - if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS)) { + assert(code != TSDB_CODE_ACTION_IN_PROGRESS); + + if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS) && + pRetrieve->qhandle != 0) { dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code); - vnodeFreeQInfoInQueue(pObj->qhandle); + vnodeDecRefCount(pObj->qhandle); pObj->qhandle = NULL; } @@ -484,8 +524,6 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { _exit: free(pSched->msg); - - return; } int vnodeProcessRetrieveRequest(char *pMsg, int msgLen, SShellObj *pObj) { @@ -518,7 +556,7 @@ static int vnodeCheckSubmitBlockContext(SShellSubmitBlock *pBlocks, SVnodeObj *p } if (pMeterObj->uid != uid) { - dError("vid:%d sid:%d id:%s, uid:%lld, uid in msg:%lld, uid mismatch", pVnode->vnode, sid, pMeterObj->meterId, + dError("vid:%d sid:%d id:%s, uid:%" PRIu64 ", uid in msg:%" PRIu64 ", uid mismatch", pVnode->vnode, sid, pMeterObj->meterId, pMeterObj->uid, uid); return TSDB_CODE_INVALID_SUBMIT_MSG; } @@ -588,6 +626,7 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { SShellSubmitMsg *pSubmit = &shellSubmit; SShellSubmitBlock *pBlocks = NULL; + pSubmit->import = htons(pSubmit->import); pSubmit->vnode = htons(pSubmit->vnode); pSubmit->numOfSid = htonl(pSubmit->numOfSid); @@ -618,7 +657,7 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { if (tsAvailDataDirGB < tsMinimalDataDirGB) { dError("server disk space remain %.3f GB, need at least %.3f GB, stop writing", tsAvailDataDirGB, tsMinimalDataDirGB); - code = TSDB_CODE_SERVER_NO_SPACE; + code = TSDB_CODE_SERV_NO_DISKSPACE; goto _submit_over; } diff --git a/src/util/src/tstatus.c b/src/system/detail/src/vnodeStatus.c similarity index 76% rename from src/util/src/tstatus.c rename to src/system/detail/src/vnodeStatus.c index 21dbd8c67de65b0d6fa5b45570f4f14627166df0..d78f3633fbb2ab23b5e2f0179eaa7bc98de79813 100644 --- a/src/util/src/tstatus.c +++ b/src/system/detail/src/vnodeStatus.c @@ -15,9 +15,9 @@ #include "taosmsg.h" #include "tsdb.h" -#include "tstatus.h" +#include "vnodeStatus.h" -const char* taosGetVgroupStatusStr(int vgroupStatus) { +const char* taosGetVgroupStatusStr(int32_t vgroupStatus) { switch (vgroupStatus) { case TSDB_VG_STATUS_READY: return tsError[vgroupStatus]; case TSDB_VG_STATUS_IN_PROGRESS: return tsError[vgroupStatus]; @@ -30,7 +30,7 @@ const char* taosGetVgroupStatusStr(int vgroupStatus) { } } -const char* taosGetDbStatusStr(int dbStatus) { +const char* taosGetDbStatusStr(int32_t dbStatus) { switch (dbStatus) { case TSDB_DB_STATUS_READY: return "ready"; case TSDB_DB_STATUS_DROPPING: return "dropping"; @@ -39,7 +39,7 @@ const char* taosGetDbStatusStr(int dbStatus) { } } -const char* taosGetVnodeStatusStr(int vnodeStatus) { +const char* taosGetVnodeStatusStr(int32_t vnodeStatus) { switch (vnodeStatus) { case TSDB_VN_STATUS_OFFLINE: return "offline"; case TSDB_VN_STATUS_CREATING: return "creating"; @@ -52,9 +52,9 @@ const char* taosGetVnodeStatusStr(int vnodeStatus) { } } -const char* taosGetVnodeSyncStatusStr(int vnodeSyncStatus) { +const char* taosGetVnodeSyncStatusStr(int32_t vnodeSyncStatus) { switch (vnodeSyncStatus) { - case TSDB_VN_SYNC_STATUS_INIT: return "init"; + case TSDB_VN_SYNC_STATUS_INIT: return "ready"; case TSDB_VN_SYNC_STATUS_SYNCING: return "syncing"; case TSDB_VN_SYNC_STATUS_SYNC_CACHE: return "sync_cache"; case TSDB_VN_SYNC_STATUS_SYNC_FILE: return "sync_file"; @@ -62,7 +62,7 @@ const char* taosGetVnodeSyncStatusStr(int vnodeSyncStatus) { } } -const char* taosGetVnodeDropStatusStr(int dropping) { +const char* taosGetVnodeDropStatusStr(int32_t dropping) { switch (dropping) { case TSDB_VN_DROP_STATUS_READY: return "ready"; case TSDB_VN_DROP_STATUS_DROPPING: return "dropping"; @@ -70,7 +70,7 @@ const char* taosGetVnodeDropStatusStr(int dropping) { } } -const char* taosGetDnodeStatusStr(int dnodeStatus) { +const char* taosGetDnodeStatusStr(int32_t dnodeStatus) { switch (dnodeStatus) { case TSDB_DN_STATUS_OFFLINE: return "offline"; case TSDB_DN_STATUS_READY: return "ready"; @@ -78,7 +78,7 @@ const char* taosGetDnodeStatusStr(int dnodeStatus) { } } -const char* taosGetDnodeLbStatusStr(int dnodeBalanceStatus) { +const char* taosGetDnodeLbStatusStr(int32_t dnodeBalanceStatus) { switch (dnodeBalanceStatus) { case TSDB_DN_LB_STATUS_BALANCED: return "balanced"; case TSDB_DN_LB_STATUS_BALANCING: return "balancing"; @@ -88,7 +88,7 @@ const char* taosGetDnodeLbStatusStr(int dnodeBalanceStatus) { } } -const char* taosGetVgroupLbStatusStr(int vglbStatus) { +const char* taosGetVgroupLbStatusStr(int32_t vglbStatus) { switch (vglbStatus) { case TSDB_VG_LB_STATUS_READY: return "ready"; case TSDB_VG_LB_STATUS_UPDATE: return "updating"; @@ -96,10 +96,22 @@ const char* taosGetVgroupLbStatusStr(int vglbStatus) { } } -const char* taosGetVnodeStreamStatusStr(int vnodeStreamStatus) { +const char* taosGetVnodeStreamStatusStr(int32_t vnodeStreamStatus) { switch (vnodeStreamStatus) { case TSDB_VN_STREAM_STATUS_START: return "start"; case TSDB_VN_STREAM_STATUS_STOP: return "stop"; default: return "undefined"; } } + +const char* taosGetTableStatusStr(int32_t tableStatus) { + switch(tableStatus) { + case TSDB_METER_STATE_INSERTING: return "inserting"; + case TSDB_METER_STATE_IMPORTING:return "importing"; + case TSDB_METER_STATE_UPDATING: return "updating"; + case TSDB_METER_STATE_DROPPING: return "deleting"; + case TSDB_METER_STATE_DROPPED: return "dropped"; + case TSDB_METER_STATE_READY: return "ready"; + default:return "undefined"; + } +} diff --git a/src/system/detail/src/vnodeStore.c b/src/system/detail/src/vnodeStore.c index 0edd182dcad140f8b985e303675317a90acd1bf1..5949b1636d1e5d48991df1ed06f63ca354a79d9a 100644 --- a/src/system/detail/src/vnodeStore.c +++ b/src/system/detail/src/vnodeStore.c @@ -22,10 +22,7 @@ #include "vnode.h" #include "vnodeStore.h" #include "vnodeUtil.h" -#include "tstatus.h" - -#pragma GCC diagnostic push -#pragma GCC diagnostic warning "-Woverflow" +#include "vnodeStatus.h" int tsMaxVnode = -1; int tsOpenVnodes = 0; @@ -59,7 +56,7 @@ static int vnodeInitStoreVnode(int vnode) { } pthread_mutex_init(&(pVnode->vmutex), NULL); - dPrint("vid:%d, storage initialized, version:%ld fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, + dPrint("vid:%d, storage initialized, version:%" PRIu64 " fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, pVnode->numOfFiles); return TSDB_CODE_SUCCESS; @@ -121,7 +118,7 @@ static int32_t vnodeMarkAllMetersDropped(SVnodeObj* pVnode) { } else { // set the meter is to be deleted SMeterObj* pObj = pVnode->meterList[sid]; if (pObj != NULL) { - pObj->state = TSDB_METER_STATE_DELETED; + pObj->state = TSDB_METER_STATE_DROPPED; } } } @@ -191,7 +188,7 @@ int vnodeCreateVnode(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { if (errno == EACCES) { return TSDB_CODE_NO_DISK_PERMISSIONS; } else if (errno == ENOSPC) { - return TSDB_CODE_SERVER_NO_SPACE; + return TSDB_CODE_SERV_NO_DISKSPACE; } else if (errno == EEXIST) { } else { return TSDB_CODE_VG_INIT_FAILED; @@ -204,7 +201,7 @@ int vnodeCreateVnode(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { if (errno == EACCES) { return TSDB_CODE_NO_DISK_PERMISSIONS; } else if (errno == ENOSPC) { - return TSDB_CODE_SERVER_NO_SPACE; + return TSDB_CODE_SERV_NO_DISKSPACE; } else if (errno == EEXIST) { } else { return TSDB_CODE_VG_INIT_FAILED; @@ -410,6 +407,3 @@ void vnodeCalcOpenVnodes() { void vnodeUpdateHeadFile(int vnode, int oldTables, int newTables) { //todo rewrite the head file with newTables } - -#pragma GCC diagnostic pop - diff --git a/src/system/detail/src/vnodeStream.c b/src/system/detail/src/vnodeStream.c index 6b5f82a687be6c163fa323ff4cb04ef25a787965..7ee20a2e59562f63903fb2ea13228070f14f0932 100644 --- a/src/system/detail/src/vnodeStream.c +++ b/src/system/detail/src/vnodeStream.c @@ -17,7 +17,7 @@ #include "taosmsg.h" #include "vnode.h" #include "vnodeUtil.h" -#include "tstatus.h" +#include "vnodeStatus.h" /* static TAOS *dbConn = NULL; */ void vnodeCloseStreamCallback(void *param); @@ -86,7 +86,7 @@ void vnodeOpenStreams(void *param, void *tmrId) { for (int sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { pObj = pVnode->meterList[sid]; - if (pObj == NULL || pObj->sqlLen == 0 || vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) continue; + if (pObj == NULL || pObj->sqlLen == 0 || vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) continue; dTrace("vid:%d sid:%d id:%s, open stream:%s", pObj->vnode, sid, pObj->meterId, pObj->pSql); diff --git a/src/system/detail/src/vnodeSystem.c b/src/system/detail/src/vnodeSystem.c index 631e258b2589aa548062e5661e937d7917665403..b23050ab03eeeeb9de329d7259da352d48fda23b 100644 --- a/src/system/detail/src/vnodeSystem.c +++ b/src/system/detail/src/vnodeSystem.c @@ -36,8 +36,14 @@ void vnodeCleanUpSystem() { bool vnodeInitQueryHandle() { int numOfThreads = tsRatioOfQueryThreads * tsNumOfCores * tsNumOfThreadsPerCore; - if (numOfThreads < 1) numOfThreads = 1; - queryQhandle = taosInitScheduler(tsNumOfVnodesPerCore * tsNumOfCores * tsSessionsPerVnode, numOfThreads, "query"); + if (numOfThreads < 1) { + numOfThreads = 1; + } + + int32_t maxQueueSize = tsNumOfVnodesPerCore * tsNumOfCores * tsSessionsPerVnode; + dTrace("query task queue initialized, max slot:%d, task threads:%d", maxQueueSize,numOfThreads); + + queryQhandle = taosInitSchedulerWithInfo(maxQueueSize, numOfThreads, "query", vnodeTmrCtrl); return true; } @@ -52,15 +58,15 @@ bool vnodeInitTmrCtl() { int vnodeInitSystem() { - if (!vnodeInitQueryHandle()) { - dError("failed to init query qhandle, exit"); - return -1; - } - if (!vnodeInitTmrCtl()) { dError("failed to init timer, exit"); return -1; } + + if (!vnodeInitQueryHandle()) { + dError("failed to init query qhandle, exit"); + return -1; + } if (vnodeInitStore() < 0) { dError("failed to init vnode storage"); diff --git a/src/system/detail/src/vnodeTagMgmt.c b/src/system/detail/src/vnodeTagMgmt.c index adf4e544bbf9efea877aa3f688afc981437815e6..5585813ec377571f8415ba6b949bb158285b1d3e 100644 --- a/src/system/detail/src/vnodeTagMgmt.c +++ b/src/system/detail/src/vnodeTagMgmt.c @@ -24,10 +24,10 @@ #include "tast.h" #include "vnodeTagMgmt.h" -#define GET_TAG_VAL_POINTER(s, col, sc, t) ((t *)(&((s)->tags[(sc)->colOffset[(col)]]))) +#define GET_TAG_VAL_POINTER(s, col, sc, t) ((t *)(&((s)->tags[getColumnModelOffset(sc, col)]))) #define GET_TAG_VAL(s, col, sc, t) (*GET_TAG_VAL_POINTER(s, col, sc, t)) -static void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *pOrder); +static void tTagsPrints(SMeterSidExtInfo *pMeterInfo, SColumnModel *pSchema, SColumnOrderInfo *pOrder); static void tSidSetDisplay(tSidSet *pSets); @@ -65,7 +65,7 @@ int32_t meterSidComparator(const void *p1, const void *p2, void *param) { SMeterSidExtInfo *s1 = (SMeterSidExtInfo *)p1; SMeterSidExtInfo *s2 = (SMeterSidExtInfo *)p2; - for (int32_t i = 0; i < pOrderDesc->orderIdx.numOfOrderedCols; ++i) { + for (int32_t i = 0; i < pOrderDesc->orderIdx.numOfCols; ++i) { int32_t colIdx = pOrderDesc->orderIdx.pData[i]; char * f1 = NULL; @@ -79,9 +79,9 @@ int32_t meterSidComparator(const void *p1, const void *p2, void *param) { type = TSDB_DATA_TYPE_BINARY; bytes = TSDB_METER_NAME_LEN; } else { - f1 = GET_TAG_VAL_POINTER(s1, colIdx, pOrderDesc->pTagSchema, char); - f2 = GET_TAG_VAL_POINTER(s2, colIdx, pOrderDesc->pTagSchema, char); - SSchema *pSchema = &pOrderDesc->pTagSchema->pSchema[colIdx]; + f1 = GET_TAG_VAL_POINTER(s1, colIdx, pOrderDesc->pColumnModel, char); + f2 = GET_TAG_VAL_POINTER(s2, colIdx, pOrderDesc->pColumnModel, char); + SSchema *pSchema = getColumnModelSchema(pOrderDesc->pColumnModel, colIdx); type = pSchema->type; bytes = pSchema->bytes; } @@ -116,9 +116,9 @@ static void median(void **pMeterSids, size_t size, int32_t s1, int32_t s2, tOrde compareFn(pMeterSids[s1], pMeterSids[s2], pOrderDesc) <= 0); #ifdef _DEBUG_VIEW - tTagsPrints(pMeterSids[s1], pOrderDesc->pTagSchema, &pOrderDesc->orderIdx); - tTagsPrints(pMeterSids[midIdx], pOrderDesc->pTagSchema, &pOrderDesc->orderIdx); - tTagsPrints(pMeterSids[s2], pOrderDesc->pTagSchema, &pOrderDesc->orderIdx); + tTagsPrints(pMeterSids[s1], pOrderDesc->pColumnModel, &pOrderDesc->orderIdx); + tTagsPrints(pMeterSids[midIdx], pOrderDesc->pColumnModel, &pOrderDesc->orderIdx); + tTagsPrints(pMeterSids[s2], pOrderDesc->pColumnModel, &pOrderDesc->orderIdx); #endif } @@ -241,25 +241,6 @@ int32_t *calculateSubGroup(void **pSids, int32_t numOfMeters, int32_t *numOfSubs return starterPos; } -tTagSchema *tCreateTagSchema(SSchema *pSchema, int32_t numOfTagCols) { - if (numOfTagCols == 0 || pSchema == NULL) { - return NULL; - } - - tTagSchema *pTagSchema = - (tTagSchema *)calloc(1, sizeof(tTagSchema) + numOfTagCols * sizeof(int32_t) + sizeof(SSchema) * numOfTagCols); - - pTagSchema->colOffset[0] = 0; - pTagSchema->numOfCols = numOfTagCols; - for (int32_t i = 1; i < numOfTagCols; ++i) { - pTagSchema->colOffset[i] = (pTagSchema->colOffset[i - 1] + pSchema[i - 1].bytes); - } - - pTagSchema->pSchema = (SSchema *)&(pTagSchema->colOffset[numOfTagCols]); - memcpy(pTagSchema->pSchema, pSchema, sizeof(SSchema) * numOfTagCols); - return pTagSchema; -} - tSidSet *tSidSetCreate(struct SMeterSidExtInfo **pMeterSidExtInfo, int32_t numOfMeters, SSchema *pSchema, int32_t numOfTags, SColIndexEx *colList, int32_t numOfCols) { tSidSet *pSidSet = (tSidSet *)calloc(1, sizeof(tSidSet) + numOfCols * sizeof(int16_t)); @@ -269,8 +250,8 @@ tSidSet *tSidSetCreate(struct SMeterSidExtInfo **pMeterSidExtInfo, int32_t numOf pSidSet->numOfSids = numOfMeters; pSidSet->pSids = pMeterSidExtInfo; - pSidSet->pTagSchema = tCreateTagSchema(pSchema, numOfTags); - pSidSet->orderIdx.numOfOrderedCols = numOfCols; + pSidSet->pColumnModel = createColumnModel(pSchema, numOfTags, 1); + pSidSet->orderIdx.numOfCols = numOfCols; /* * in case of "group by tbname,normal_col", the normal_col is ignored @@ -282,7 +263,7 @@ tSidSet *tSidSetCreate(struct SMeterSidExtInfo **pMeterSidExtInfo, int32_t numOf } } - pSidSet->orderIdx.numOfOrderedCols = numOfTagCols; + pSidSet->orderIdx.numOfCols = numOfTagCols; pSidSet->starterPos = NULL; return pSidSet; @@ -291,19 +272,19 @@ tSidSet *tSidSetCreate(struct SMeterSidExtInfo **pMeterSidExtInfo, int32_t numOf void tSidSetDestroy(tSidSet **pSets) { if ((*pSets) != NULL) { tfree((*pSets)->starterPos); - tfree((*pSets)->pTagSchema)(*pSets)->pSids = NULL; + tfree((*pSets)->pColumnModel)(*pSets)->pSids = NULL; tfree(*pSets); } } -void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *pOrder) { +void tTagsPrints(SMeterSidExtInfo *pMeterInfo, SColumnModel *pSchema, SColumnOrderInfo *pOrder) { if (pSchema == NULL) { return; } printf("sid: %-5d tags(", pMeterInfo->sid); - for (int32_t i = 0; i < pOrder->numOfOrderedCols; ++i) { + for (int32_t i = 0; i < pOrder->numOfCols; ++i) { int32_t colIndex = pOrder->pData[i]; // it is the tbname column @@ -312,7 +293,9 @@ void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *p continue; } - switch (pSchema->pSchema[colIndex].type) { + SSchema* s = getColumnModelSchema(pSchema, colIndex); + + switch (s->type) { case TSDB_DATA_TYPE_INT: printf("%d, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int32_t)); break; @@ -323,7 +306,7 @@ void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *p printf("%f, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, float)); break; case TSDB_DATA_TYPE_BIGINT: - printf("%ld, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int64_t)); + printf("%" PRId64 ", ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int64_t)); break; case TSDB_DATA_TYPE_SMALLINT: printf("%d, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int16_t)); @@ -336,9 +319,9 @@ void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *p break; case TSDB_DATA_TYPE_NCHAR: { char *data = GET_TAG_VAL_POINTER(pMeterInfo, colIndex, pSchema, char); + char buffer[512] = {0}; - - taosUcs4ToMbs(data, pSchema->pSchema[colIndex].bytes, buffer); + taosUcs4ToMbs(data, s->bytes, buffer); printf("%s, ", buffer); break; } @@ -370,16 +353,16 @@ static void UNUSED_FUNC tSidSetDisplay(tSidSet *pSets) { printf("the %d-th subgroup: \n", i + 1); for (int32_t j = s; j < e; ++j) { - tTagsPrints(pSets->pSids[j], pSets->pTagSchema, &pSets->orderIdx); + tTagsPrints(pSets->pSids[j], pSets->pColumnModel, &pSets->orderIdx); } } } void tSidSetSort(tSidSet *pSets) { pTrace("number of meters in sort: %d", pSets->numOfSids); - tOrderIdx *pOrderIdx = &pSets->orderIdx; + SColumnOrderInfo *pOrderIdx = &pSets->orderIdx; - if (pOrderIdx->numOfOrderedCols == 0 || pSets->numOfSids <= 1 || pSets->pTagSchema == NULL) { // no group by tags clause + if (pOrderIdx->numOfCols == 0 || pSets->numOfSids <= 1 || pSets->pColumnModel == NULL) { // no group by tags clause pSets->numOfSubSet = 1; pSets->starterPos = (int32_t *)malloc(sizeof(int32_t) * (pSets->numOfSubSet + 1)); pSets->starterPos[0] = 0; @@ -390,11 +373,11 @@ void tSidSetSort(tSidSet *pSets) { #endif } else { tOrderDescriptor *descriptor = - (tOrderDescriptor *)calloc(1, sizeof(tOrderDescriptor) + sizeof(int16_t) * pSets->orderIdx.numOfOrderedCols); - descriptor->pTagSchema = pSets->pTagSchema; + (tOrderDescriptor *)calloc(1, sizeof(tOrderDescriptor) + sizeof(int16_t) * pSets->orderIdx.numOfCols); + descriptor->pColumnModel = pSets->pColumnModel; descriptor->orderIdx = pSets->orderIdx; - memcpy(descriptor->orderIdx.pData, pOrderIdx->pData, sizeof(int16_t) * pSets->orderIdx.numOfOrderedCols); + memcpy(descriptor->orderIdx.pData, pOrderIdx->pData, sizeof(int16_t) * pSets->orderIdx.numOfCols); tQSortEx((void **)pSets->pSids, POINTER_BYTES, 0, pSets->numOfSids - 1, descriptor, meterSidComparator); pSets->starterPos = diff --git a/src/system/detail/src/vnodeUtil.c b/src/system/detail/src/vnodeUtil.c index 68e1e428c0e34a56e3bcbba80d11aaece758910c..6f25d3a8b12c5cf730b8cbfadfaaa9e2837c5a63 100644 --- a/src/system/detail/src/vnodeUtil.c +++ b/src/system/detail/src/vnodeUtil.c @@ -22,8 +22,7 @@ #include "vnode.h" #include "vnodeDataFilterFunc.h" #include "vnodeUtil.h" - -#pragma GCC diagnostic ignored "-Wint-conversion" +#include "vnodeStatus.h" int vnodeCheckFileIntegrity(FILE* fp) { /* @@ -196,9 +195,9 @@ static int32_t vnodeBuildExprFromArithmeticStr(SSqlFunctionExpr* pExpr, SQueryMe num = i + 1; pBinaryExprInfo->pReqColumns = malloc(sizeof(SColIndexEx) * num); - for (int32_t i = 0; i < num; ++i) { - SColIndexEx* pColIndex = &pBinaryExprInfo->pReqColumns[i]; - pColIndex->colId = ids[i]; + for (int32_t k = 0; k < num; ++k) { + SColIndexEx* pColIndex = &pBinaryExprInfo->pReqColumns[k]; + pColIndex->colId = ids[k]; } pBinaryExprInfo->numOfCols = num; @@ -248,12 +247,12 @@ SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t* SColIndexEx* pColumnIndexExInfo = &pExprs[i].pBase.colInfo; - // tag column schema is kept in pQueryMsg->pTagSchema + // tag column schema is kept in pQueryMsg->pColumnModel if (TSDB_COL_IS_TAG(pColumnIndexExInfo->flag)) { if (pColumnIndexExInfo->colIdx >= pQueryMsg->numOfTagsCols) { *code = TSDB_CODE_INVALID_QUERY_MSG; tfree(pExprs); - break; + return NULL; } type = pTagSchema[pColumnIndexExInfo->colIdx].type; @@ -265,7 +264,7 @@ SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t* if (*code != TSDB_CODE_SUCCESS) { tfree(pExprs); - break; + return NULL; } type = TSDB_DATA_TYPE_DOUBLE; @@ -540,52 +539,72 @@ bool vnodeIsProjectionQuery(SSqlFunctionExpr* pExpr, int32_t numOfOutput) { * 3. insert has nothing to do with the query processing. */ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSids, SMeterObj** pMeterObjList, - int32_t* numOfInc) { + int32_t* numOfIncTables) { SVnodeObj* pVnode = &vnodeList[pQueryMsg->vnode]; int32_t num = 0; + int32_t index = 0; + int32_t code = TSDB_CODE_SUCCESS; for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { SMeterObj* pMeter = pVnode->meterList[pSids[i]->sid]; - if (pMeter == NULL || (pMeter->state > TSDB_METER_STATE_INSERT)) { - if (pMeter == NULL || vnodeIsMeterState(pMeter, TSDB_METER_STATE_DELETING)) { - code = TSDB_CODE_NOT_ACTIVE_TABLE; - dError("qmsg:%p, vid:%d sid:%d, not there or will be dropped", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid); - vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); - } else {//update or import - code = TSDB_CODE_ACTION_IN_PROGRESS; - dTrace("qmsg:%p, vid:%d sid:%d id:%s, it is in state:%d, wait!", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid, - pMeter->meterId, pMeter->state); - } - } else { - /* - * vnodeIsSafeToDeleteMeter will wait for this function complete, and then it can - * check if the numOfQueries is 0 or not. - */ - pMeterObjList[(*numOfInc)++] = pMeter; - atomic_fetch_add_32(&pMeter->numOfQueries, 1); - - // output for meter more than one query executed - if (pMeter->numOfQueries > 1) { - dTrace("qmsg:%p, vid:%d sid:%d id:%s, inc query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, - pMeter->meterId, pMeter->numOfQueries); - num++; - } + /* + * If table is missing or is in dropping status, config it from management node, and ignore it + * during query processing. The error code of TSDB_CODE_NOT_ACTIVE_TABLE will never return to client. + * The missing table needs to be removed from pSids list + */ + if (pMeter == NULL || vnodeIsMeterState(pMeter, TSDB_METER_STATE_DROPPING)) { + dWarn("qmsg:%p, vid:%d sid:%d, not there or will be dropped, ignore this table in query", pQueryMsg, + pQueryMsg->vnode, pSids[i]->sid); + + vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); + continue; + } else if (pMeter->uid != pSids[i]->uid || pMeter->sid != pSids[i]->sid) { + code = TSDB_CODE_TABLE_ID_MISMATCH; + dError("qmsg:%p, vid:%d sid:%d id:%s uid:%" PRIu64 ", id mismatch. sid:%d uid:%" PRId64 " in msg", pQueryMsg, + pQueryMsg->vnode, pMeter->sid, pMeter->meterId, pMeter->uid, pSids[i]->sid, pSids[i]->uid); + + vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); + continue; + } else if (pMeter->state > TSDB_METER_STATE_INSERTING) { //update or import + code = TSDB_CODE_ACTION_IN_PROGRESS; + dTrace("qmsg:%p, vid:%d sid:%d id:%s, it is in state:%s, wait!", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid, + pMeter->meterId, taosGetTableStatusStr(pMeter->state)); + continue; + } + + /* + * vnodeIsSafeToDeleteMeter will wait for this function complete, and then it can + * check if the numOfQueries is 0 or not. + */ + pMeterObjList[(*numOfIncTables)++] = pMeter; + atomic_fetch_add_32(&pMeter->numOfQueries, 1); + + pSids[index++] = pSids[i]; + + // output for meter more than one query executed + if (pMeter->numOfQueries > 1) { + dTrace("qmsg:%p, vid:%d sid:%d id:%s, inc query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, + pMeter->meterId, pMeter->numOfQueries); + num++; } } - dTrace("qmsg:%p, query meters: %d, inc query ref %d, numOfQueries on %d meters are 1", pQueryMsg, - pQueryMsg->numOfSids, *numOfInc, (*numOfInc) - num); + dTrace("qmsg:%p, query meters: %d, inc query ref %d, numOfQueries on %d meters are 1, queried meters:%d after " + "filter missing meters", pQueryMsg, pQueryMsg->numOfSids, *numOfIncTables, (*numOfIncTables) - num, index); + assert(pQueryMsg->numOfSids >= (*numOfIncTables) && pQueryMsg->numOfSids >= index); + + pQueryMsg->numOfSids = index; return code; } -void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, int32_t numOfInc) { +void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, int32_t numOfIncTables) { int32_t num = 0; - for (int32_t i = 0; i < numOfInc; ++i) { + for (int32_t i = 0; i < numOfIncTables; ++i) { SMeterObj* pMeter = pMeterObjList[i]; if (pMeter != NULL) { // here, do not need to lock to perform operations @@ -599,7 +618,7 @@ void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, } } - dTrace("qmsg:%p, dec query ref for %d meters, numOfQueries on %d meters are 0", pQueryMsg, numOfInc, numOfInc - num); + dTrace("qmsg:%p, dec query ref for %d meters, numOfQueries on %d meters are 0", pQueryMsg, numOfIncTables, numOfIncTables - num); } void vnodeUpdateQueryColumnIndex(SQuery* pQuery, SMeterObj* pMeterObj) { @@ -654,7 +673,7 @@ void vnodeClearMeterState(SMeterObj* pMeterObj, int32_t state) { bool vnodeIsMeterState(SMeterObj* pMeterObj, int32_t state) { if (state == TSDB_METER_STATE_READY) { return pMeterObj->state == TSDB_METER_STATE_READY; - } else if (state == TSDB_METER_STATE_DELETING) { + } else if (state == TSDB_METER_STATE_DROPPING) { return pMeterObj->state >= state; } else { return (((pMeterObj->state) & state) == state); @@ -666,7 +685,7 @@ void vnodeSetMeterDeleting(SMeterObj* pMeterObj) { return; } - pMeterObj->state |= TSDB_METER_STATE_DELETING; + pMeterObj->state |= TSDB_METER_STATE_DROPPING; } int32_t vnodeSetMeterInsertImportStateEx(SMeterObj* pObj, int32_t st) { @@ -674,7 +693,7 @@ int32_t vnodeSetMeterInsertImportStateEx(SMeterObj* pObj, int32_t st) { int32_t state = vnodeSetMeterState(pObj, st); if (state != TSDB_METER_STATE_READY) {//return to denote import is not performed - if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETING)) { + if (vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPING)) { dTrace("vid:%d sid:%d id:%s, meter is deleted, state:%d", pObj->vnode, pObj->sid, pObj->meterId, pObj->state); code = TSDB_CODE_NOT_ACTIVE_TABLE; @@ -692,17 +711,17 @@ int32_t vnodeSetMeterInsertImportStateEx(SMeterObj* pObj, int32_t st) { bool vnodeIsSafeToDeleteMeter(SVnodeObj* pVnode, int32_t sid) { SMeterObj* pObj = pVnode->meterList[sid]; - if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DELETED)) { + if (pObj == NULL || vnodeIsMeterState(pObj, TSDB_METER_STATE_DROPPED)) { return true; } - int32_t prev = vnodeSetMeterState(pObj, TSDB_METER_STATE_DELETING); + int32_t prev = vnodeSetMeterState(pObj, TSDB_METER_STATE_DROPPING); /* * if the meter is not in ready/deleting state, it must be in insert/import/update, * set the deleting state and wait the procedure to be completed */ - if (prev != TSDB_METER_STATE_READY && prev < TSDB_METER_STATE_DELETING) { + if (prev != TSDB_METER_STATE_READY && prev < TSDB_METER_STATE_DROPPING) { vnodeSetMeterDeleting(pObj); dWarn("vid:%d sid:%d id:%s, can not be deleted, state:%d, wait", pObj->vnode, pObj->sid, pObj->meterId, prev); @@ -712,7 +731,7 @@ bool vnodeIsSafeToDeleteMeter(SVnodeObj* pVnode, int32_t sid) { bool ready = true; /* - * the query will be stopped ASAP, since the state of meter is set to TSDB_METER_STATE_DELETING, + * the query will be stopped ASAP, since the state of meter is set to TSDB_METER_STATE_DROPPING, * and new query will abort since the meter is deleted. */ pthread_mutex_lock(&pVnode->vmutex); diff --git a/src/system/lite/CMakeLists.txt b/src/system/lite/CMakeLists.txt index a22ed60563e5a237d060abfc46dc9fa87e546a87..8c648747e7bb243e06b8c9167b7a85e549d2ad9c 100644 --- a/src/system/lite/CMakeLists.txt +++ b/src/system/lite/CMakeLists.txt @@ -13,5 +13,5 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(./src SRC) - ADD_LIBRARY(taosd_lite ${SRC}) + ADD_LIBRARY(taosd_edge ${SRC}) ENDIF () diff --git a/src/system/lite/src/dnodeMgmt.spec.c b/src/system/lite/src/dnodeMgmt.spec.c index e40b21ab8340aace1198d3e8b632b61f78602adb..576dbcb337b329571296d331e043e2ab1bc8d92c 100644 --- a/src/system/lite/src/dnodeMgmt.spec.c +++ b/src/system/lite/src/dnodeMgmt.spec.c @@ -50,7 +50,7 @@ char *taosBuildReqMsgToMnode(SMgmtObj *pObj, char type) { } int taosSendMsgToMnode(SMgmtObj *pObj, char *msg, int msgLen) { - dTrace("msg:%s is sent to mnode", taosMsg[*(msg-1)]); + dTrace("msg:%s is sent to mnode", taosMsg[(uint8_t)(*(msg-1))]); /* * Lite version has no message header, so minus one @@ -81,7 +81,7 @@ void vnodeProcessMsgFromMgmtSpec(SSchedMsg *sched) { char msgType = *sched->msg; char *content = sched->msg + 1; - dTrace("msg:%s is received from mgmt", taosMsg[msgType]); + dTrace("msg:%s is received from mgmt", taosMsg[(uint8_t)msgType]); vnodeProcessMsgFromMgmt(content, 0, msgType, 0); diff --git a/src/system/lite/src/mgmtBalance.spec.c b/src/system/lite/src/mgmtBalance.spec.c index 33fe4502a4ae6451905106f55b580f69164abdb4..cf3e999e4f48c09df5da03d55b8084b0d1169168 100644 --- a/src/system/lite/src/mgmtBalance.spec.c +++ b/src/system/lite/src/mgmtBalance.spec.c @@ -15,9 +15,9 @@ #define _DEFAULT_SOURCE #include "mgmtBalance.h" -#include "tstatus.h" +#include "vnodeStatus.h" -void mgmtStartBalanceTimer(int mseconds) {} +void mgmtStartBalanceTimer(int64_t mseconds) {} int mgmtInitBalance() { return 0; } diff --git a/src/system/lite/src/mgmtDnode.spec.c b/src/system/lite/src/mgmtDnode.spec.c index dc7dd7d4725e8176daf358feefc14a4467791606..7fd9e7a2dfac9fa57969f393b367d668e1c419fa 100644 --- a/src/system/lite/src/mgmtDnode.spec.c +++ b/src/system/lite/src/mgmtDnode.spec.c @@ -15,7 +15,7 @@ #define _DEFAULT_SOURCE #include "mgmt.h" -#include "tstatus.h" +#include "vnodeStatus.h" SDnodeObj dnodeObj; extern uint32_t tsRebootTime; @@ -27,7 +27,7 @@ int mgmtUpdateDnode(SDnodeObj *pDnode) { return 0; } void mgmtCleanUpDnodes() {} int mgmtInitDnodes() { - dnodeObj.privateIp = inet_addr(tsInternalIp);; + dnodeObj.privateIp = inet_addr(tsPrivateIp);; dnodeObj.createdTime = (int64_t)tsRebootTime * 1000; dnodeObj.lastReboot = tsRebootTime; dnodeObj.numOfCores = (uint16_t)tsNumOfCores; diff --git a/src/system/lite/src/mgmtDnodeInt.spec.c b/src/system/lite/src/mgmtDnodeInt.spec.c index b99815844d6fe4e2e50a0bb1d9d6a5f1bc5411af..734fa630c5303faddb510093c20de21aafc521b9 100644 --- a/src/system/lite/src/mgmtDnodeInt.spec.c +++ b/src/system/lite/src/mgmtDnodeInt.spec.c @@ -23,7 +23,7 @@ #include "tutil.h" #include "vnode.h" #include "tsystem.h" -#include "tstatus.h" +#include "vnodeStatus.h" extern void *dmQhandle; void * mgmtStatusTimer = NULL; @@ -61,7 +61,7 @@ char *taosBuildReqMsgToDnode(SDnodeObj *pObj, char type) { int taosSendSimpleRspToDnode(SDnodeObj *pObj, char rsptype, char code) { return 0; } int taosSendMsgToDnode(SDnodeObj *pObj, char *msg, int msgLen) { - mTrace("msg:%s is sent to dnode", taosMsg[*(msg-1)]); + mTrace("msg:%s is sent to dnode", taosMsg[(uint8_t)(*(msg-1))]); /* * Lite version has no message header, so minus one @@ -142,7 +142,7 @@ void mgmtProcessDnodeStatus(void *handle, void *tmrId) { void mgmtProcessMsgFromDnodeSpec(SSchedMsg *sched) { char msgType = *sched->msg; char *content = sched->msg + 1; - mTrace("msg:%s is received from dnode", taosMsg[msgType]); + mTrace("msg:%s is received from dnode", taosMsg[(uint8_t)msgType]); mgmtProcessMsgFromDnode(content, 0, msgType, mgmtGetDnode(0)); free(sched->msg); diff --git a/src/system/lite/src/mgmtShell.spec.c b/src/system/lite/src/mgmtShell.spec.c index 5195010b4129fb2677526aa62f3d40f53d9bdded..a1d8e6a34a4e2cc2d7df7c1acc6cdf75a796fc1a 100644 --- a/src/system/lite/src/mgmtShell.spec.c +++ b/src/system/lite/src/mgmtShell.spec.c @@ -24,7 +24,7 @@ int mgmtProcessAlterAcctMsg(char *pMsg, int msgLen, SConnObj *pConn) { } int mgmtProcessCreateDnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { - return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_PNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); + return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_DNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); } int mgmtProcessCfgMnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { @@ -36,7 +36,7 @@ int mgmtProcessDropMnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { } int mgmtProcessDropDnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) { - return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_PNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); + return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_DNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT); } int mgmtProcessDropAcctMsg(char *pMsg, int msgLen, SConnObj *pConn) { diff --git a/src/system/lite/src/vnodeFile.spec.c b/src/system/lite/src/vnodeFile.spec.c index 7b26ed6c9f22ed563e6ed529ba51d9711bccb852..4ad624d2ad74b7ded307c014586552ef4f0a6e4e 100644 --- a/src/system/lite/src/vnodeFile.spec.c +++ b/src/system/lite/src/vnodeFile.spec.c @@ -24,7 +24,7 @@ char* vnodeGetDataDir(int vnode, int fileId) { return dataDir; } void vnodeAdustVnodeFile(SVnodeObj *pVnode) { // Retention policy here int fileId = pVnode->fileId - pVnode->numOfFiles + 1; - int cfile = taosGetTimestamp(pVnode->cfg.precision)/pVnode->cfg.daysPerFile/tsMsPerDay[pVnode->cfg.precision]; + int cfile = taosGetTimestamp(pVnode->cfg.precision)/pVnode->cfg.daysPerFile/tsMsPerDay[(uint8_t)pVnode->cfg.precision]; while (fileId <= cfile - pVnode->maxFiles) { vnodeRemoveFile(pVnode->vnode, fileId); pVnode->numOfFiles--; diff --git a/src/system/lite/src/vnodePeer.spec.c b/src/system/lite/src/vnodePeer.spec.c index d7da8b66f44fb0b7e8e7ccc22ce36117a1b9e2d2..34400d4051729a8b0d8e50ec01a8d5ae877c9622 100644 --- a/src/system/lite/src/vnodePeer.spec.c +++ b/src/system/lite/src/vnodePeer.spec.c @@ -15,7 +15,7 @@ #define _DEFAULT_SOURCE #include "vnode.h" -#include "tstatus.h" +#include "vnodeStatus.h" int vnodeInitPeer(int numOfThreads) { return 0; } diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 5e84f3feadbf4f8cee66c334ac62465ed081936e..d8f74f46f4ef47ea4184c8dc3e915fccd5034a4d 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -34,6 +34,7 @@ ELSEIF (TD_WINDOWS_64) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/iconv) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) + LIST(APPEND SRC ./src/hash.c) LIST(APPEND SRC ./src/ihash.c) LIST(APPEND SRC ./src/lz4.c) LIST(APPEND SRC ./src/shash.c) @@ -54,10 +55,10 @@ ELSEIF (TD_WINDOWS_64) LIST(APPEND SRC ./src/tmempool.c) LIST(APPEND SRC ./src/tmodule.c) LIST(APPEND SRC ./src/tnote.c) + LIST(APPEND SRC ./src/tpercentile.c) LIST(APPEND SRC ./src/tsched.c) LIST(APPEND SRC ./src/tskiplist.c) LIST(APPEND SRC ./src/tsocket.c) - LIST(APPEND SRC ./src/tstatus.c) LIST(APPEND SRC ./src/tstrbuild.c) LIST(APPEND SRC ./src/ttime.c) LIST(APPEND SRC ./src/ttimer.c) @@ -69,9 +70,39 @@ ELSEIF (TD_WINDOWS_64) TARGET_LINK_LIBRARIES(tutil iconv regex pthread os winmm IPHLPAPI ws2_32) ELSEIF(TD_DARWIN_64) ADD_DEFINITIONS(-DUSE_LIBICONV) - AUX_SOURCE_DIRECTORY(src SRC) - LIST(REMOVE_ITEM SRC ./src/tcrc32c.c) - LIST(REMOVE_ITEM SRC ./src/tdes.c) + LIST(APPEND SRC ./src/hash.c) + LIST(APPEND SRC ./src/ihash.c) + LIST(APPEND SRC ./src/lz4.c) + LIST(APPEND SRC ./src/shash.c) + LIST(APPEND SRC ./src/tbase64.c) + LIST(APPEND SRC ./src/tcache.c) + LIST(APPEND SRC ./src/tcompression.c) + LIST(APPEND SRC ./src/textbuffer.c) + LIST(APPEND SRC ./src/tglobalcfg.c) + LIST(APPEND SRC ./src/thash.c) + LIST(APPEND SRC ./src/thashutil.c) + LIST(APPEND SRC ./src/thistogram.c) + LIST(APPEND SRC ./src/tidpool.c) + LIST(APPEND SRC ./src/tinterpolation.c) + LIST(APPEND SRC ./src/tlog.c) + LIST(APPEND SRC ./src/tlosertree.c) + LIST(APPEND SRC ./src/tmd5.c) + LIST(APPEND SRC ./src/tmem.c) + LIST(APPEND SRC ./src/tmempool.c) + LIST(APPEND SRC ./src/tmodule.c) + LIST(APPEND SRC ./src/tnote.c) + LIST(APPEND SRC ./src/tpercentile.c) + LIST(APPEND SRC ./src/tsched.c) + LIST(APPEND SRC ./src/tskiplist.c) + LIST(APPEND SRC ./src/tsocket.c) + LIST(APPEND SRC ./src/tstrbuild.c) + LIST(APPEND SRC ./src/ttime.c) + LIST(APPEND SRC ./src/ttimer.c) + LIST(APPEND SRC ./src/ttokenizer.c) + LIST(APPEND SRC ./src/ttypes.c) + LIST(APPEND SRC ./src/tutil.c) + LIST(APPEND SRC ./src/version.c) + LIST(APPEND SRC ./src/hash.c) ADD_LIBRARY(tutil ${SRC}) TARGET_LINK_LIBRARIES(tutil iconv pthread os) ENDIF() diff --git a/src/util/src/hash.c b/src/util/src/hash.c new file mode 100644 index 0000000000000000000000000000000000000000..99643c92cc68d2964db90fdbf259b37c174bb5f9 --- /dev/null +++ b/src/util/src/hash.c @@ -0,0 +1,553 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" + +#include "hash.h" +#include "tlog.h" +#include "ttime.h" +#include "tutil.h" + +static FORCE_INLINE void __wr_lock(void *lock) { +#if defined LINUX + pthread_rwlock_wrlock(lock); +#else + pthread_mutex_lock(lock); +#endif +} + +static FORCE_INLINE void __rd_lock(void *lock) { +#if defined LINUX + pthread_rwlock_rdlock(lock); +#else + pthread_mutex_lock(lock); +#endif +} + +static FORCE_INLINE void __unlock(void *lock) { +#if defined LINUX + pthread_rwlock_unlock(lock); +#else + pthread_mutex_unlock(lock); +#endif +} + +static FORCE_INLINE int32_t __lock_init(void *lock) { +#if defined LINUX + return pthread_rwlock_init(lock, NULL); +#else + return pthread_mutex_init(lock, NULL); +#endif +} + +static FORCE_INLINE void __lock_destroy(void *lock) { +#if defined LINUX + pthread_rwlock_destroy(lock); +#else + pthread_mutex_destroy(lock); +#endif +} + +static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { + int32_t len = MIN(length, HASH_MAX_CAPACITY); + + uint32_t i = 4; + while (i < len) i = (i << 1U); + return i; +} + +/** + * hash key function + * + * @param key key string + * @param len length of key + * @return hash value + */ +static FORCE_INLINE uint32_t taosHashKey(const char *key, uint32_t len) { return MurmurHash3_32(key, len); } + +/** + * inplace update node in hash table + * @param pObj hash table object + * @param pNode data node + */ +static void doUpdateHashTable(HashObj *pObj, SHashNode *pNode) { + if (pNode->prev1) { + pNode->prev1->next = pNode; + } + + if (pNode->next) { + (pNode->next)->prev = pNode; + } + + pTrace("key:%s %p update hash table", pNode->key, pNode); +} + +/** + * get SHashNode from hashlist, nodes from trash are not included. + * @param pObj Cache objection + * @param key key for hash + * @param keyLen key length + * @return + */ +static SHashNode *doGetNodeFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen, uint32_t *hashVal) { + uint32_t hash = (*pObj->hashFp)(key, keyLen); + + int32_t slot = HASH_INDEX(hash, pObj->capacity); + SHashEntry *pEntry = pObj->hashList[slot]; + + SHashNode *pNode = pEntry->next; + while (pNode) { + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + break; + } + + pNode = pNode->next; + } + + if (pNode) { + assert(HASH_INDEX(pNode->hashVal, pObj->capacity) == slot); + } + + // return the calculated hash value, to avoid calculating it again in other functions + if (hashVal != NULL) { + *hashVal = hash; + } + + return pNode; +} + +/** + * resize the hash list if the threshold is reached + * + * @param pObj + */ +static void taosHashTableResize(HashObj *pObj) { + if (pObj->size < pObj->capacity * HASH_DEFAULT_LOAD_FACTOR) { + return; + } + + // double the original capacity + SHashNode *pNode = NULL; + SHashNode *pNext = NULL; + + int32_t newSize = pObj->capacity << 1U; + if (newSize > HASH_MAX_CAPACITY) { + pTrace("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", pObj->capacity, + HASH_MAX_CAPACITY); + return; + } + + int64_t st = taosGetTimestampUs(); + + SHashEntry **pNewEntry = realloc(pObj->hashList, sizeof(SHashEntry*) * newSize); + if (pNewEntry == NULL) { + pTrace("cache resize failed due to out of memory, capacity remain:%d", pObj->capacity); + return; + } + + pObj->hashList = pNewEntry; + for(int32_t i = pObj->capacity; i < newSize; ++i) { + pObj->hashList[i] = calloc(1, sizeof(SHashEntry)); + } + + pObj->capacity = newSize; + + for (int32_t i = 0; i < pObj->capacity; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + + pNode = pEntry->next; + if (pNode != NULL) { + assert(pNode->prev1 == pEntry && pEntry->num > 0); + } + + while (pNode) { + int32_t j = HASH_INDEX(pNode->hashVal, pObj->capacity); + if (j == i) { // this key resides in the same slot, no need to relocate it + pNode = pNode->next; + } else { + pNext = pNode->next; + + // remove from current slot + assert(pNode->prev1 != NULL); + + if (pNode->prev1 == pEntry) { // first node of the overflow linked list + pEntry->next = pNode->next; + } else { + pNode->prev->next = pNode->next; + } + + pEntry->num--; + assert(pEntry->num >= 0); + + if (pNode->next != NULL) { + (pNode->next)->prev = pNode->prev; + } + + // added into new slot + pNode->next = NULL; + pNode->prev1 = NULL; + + SHashEntry *pNewIndexEntry = pObj->hashList[j]; + + if (pNewIndexEntry->next != NULL) { + assert(pNewIndexEntry->next->prev1 == pNewIndexEntry); + + pNewIndexEntry->next->prev = pNode; + } + + pNode->next = pNewIndexEntry->next; + pNode->prev1 = pNewIndexEntry; + + pNewIndexEntry->next = pNode; + pNewIndexEntry->num++; + + // continue + pNode = pNext; + } + } + } + + int64_t et = taosGetTimestampUs(); + + pTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pObj->capacity, + ((double)pObj->size) / pObj->capacity, (et - st) / 1000.0); +} + +/** + * @param capacity maximum slots available for hash elements + * @param fn hash function + * @return + */ +void *taosInitHashTable(uint32_t capacity, _hash_fn_t fn, bool multithreadSafe) { + if (capacity == 0 || fn == NULL) { + return NULL; + } + + HashObj *pObj = (HashObj *)calloc(1, sizeof(HashObj)); + if (pObj == NULL) { + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + // the max slots is not defined by user + pObj->capacity = taosHashCapacity(capacity); + assert((pObj->capacity & (pObj->capacity - 1)) == 0); + + pObj->hashFp = fn; + + pObj->hashList = (SHashEntry **)calloc(pObj->capacity, sizeof(SHashEntry*)); + if (pObj->hashList == NULL) { + free(pObj); + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + for(int32_t i = 0; i < pObj->capacity; ++i) { + pObj->hashList[i] = calloc(1, sizeof(SHashEntry)); + } + + if (multithreadSafe && (__lock_init(pObj) != 0)) { + free(pObj->hashList); + free(pObj); + + pError("failed to init lock, reason:%s", strerror(errno)); + return NULL; + } + + return (void *)pObj; +} + +/** + * @param key key of object for hash, usually a null-terminated string + * @param keyLen length of key + * @param pData actually data. required a consecutive memory block, no pointer is allowed + * in pData. Pointer copy causes memory access error. + * @param size size of block + * @return SHashNode + */ +static SHashNode *doCreateHashNode(const char *key, uint32_t keyLen, const char *pData, size_t dataSize, + uint32_t hashVal) { + size_t totalSize = dataSize + sizeof(SHashNode) + keyLen; + + SHashNode *pNewNode = calloc(1, totalSize); + if (pNewNode == NULL) { + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + memcpy(pNewNode->data, pData, dataSize); + + pNewNode->key = pNewNode->data + dataSize; + memcpy(pNewNode->key, key, keyLen); + pNewNode->keyLen = keyLen; + + pNewNode->hashVal = hashVal; + + return pNewNode; +} + +static SHashNode *doUpdateHashNode(SHashNode *pNode, const char *key, uint32_t keyLen, const char *pData, + size_t dataSize) { + size_t size = dataSize + sizeof(SHashNode) + keyLen; + + SHashNode *pNewNode = (SHashNode *)realloc(pNode, size); + if (pNewNode == NULL) { + return NULL; + } + + memcpy(pNewNode->data, pData, dataSize); + + pNewNode->key = pNewNode->data + dataSize; + + assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen); + + memcpy(pNewNode->key, key, keyLen); + return pNewNode; +} + +/** + * insert the hash node at the front of the linked list + * + * @param pObj + * @param pNode + */ +static void doAddToHashTable(HashObj *pObj, SHashNode *pNode) { + assert(pNode != NULL); + + int32_t index = HASH_INDEX(pNode->hashVal, pObj->capacity); + SHashEntry *pEntry = pObj->hashList[index]; + + pNode->next = pEntry->next; + + if (pEntry->next) { + pEntry->next->prev = pNode; + } + + pEntry->next = pNode; + pNode->prev1 = pEntry; + + pEntry->num++; + pObj->size++; + +// char key[512] = {0}; +// memcpy(key, pNode->key, MIN(512, pNode->keyLen)); +// pTrace("key:%s %p add to hash table", key, pNode); +} + +int32_t taosNumElemsInHashTable(HashObj *pObj) { + if (pObj == NULL) { + return 0; + } + + return pObj->size; +} + +/** + * add data node into hash table + * @param pObj hash object + * @param pNode hash node + */ +int32_t taosAddToHashTable(HashObj *pObj, const char *key, uint32_t keyLen, void *data, uint32_t size) { + if (pObj->multithreadSafe) { + __wr_lock(&pObj->lock); + } + + uint32_t hashVal = 0; + SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &hashVal); + + if (pNode == NULL) { // no data in hash table with the specified key, add it into hash table + taosHashTableResize(pObj); + + SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); + if (pNewNode == NULL) { + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return -1; + } + + doAddToHashTable(pObj, pNewNode); + } else { + SHashNode *pNewNode = doUpdateHashNode(pNode, key, keyLen, data, size); + if (pNewNode == NULL) { + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return -1; + } + + doUpdateHashTable(pObj, pNewNode); + } + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return 0; +} + +char *taosGetDataFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen) { + if (pObj->multithreadSafe) { + __rd_lock(&pObj->lock); + } + + uint32_t hashVal = 0; + SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &hashVal); + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + if (pNode != NULL) { + assert(pNode->hashVal == hashVal); + + return pNode->data; + } else { + return NULL; + } +} + +/** + * remove node in hash list + * @param pObj + * @param pNode + */ +void taosDeleteFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen) { + if (pObj->multithreadSafe) { + __wr_lock(&pObj->lock); + } + + uint32_t val = 0; + SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &val); + if (pNode == NULL) { + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } + + return; + } + + SHashNode *pNext = pNode->next; + if (pNode->prev != NULL) { + int32_t slot = HASH_INDEX(val, pObj->capacity); + if (pObj->hashList[slot]->next == pNode) { + pObj->hashList[slot]->next = pNext; + } else { + pNode->prev->next = pNext; + } + } + + if (pNext != NULL) { + pNext->prev = pNode->prev; + } + + uint32_t index = HASH_INDEX(pNode->hashVal, pObj->capacity); + SHashEntry *pEntry = pObj->hashList[index]; + pEntry->num--; + + pObj->size--; + + pNode->next = NULL; + pNode->prev = NULL; + + pTrace("key:%s %p remove from hash table", pNode->key, pNode); + tfree(pNode); + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + } +} + +void taosCleanUpHashTable(void *handle) { + HashObj *pObj = (HashObj *)handle; + if (pObj == NULL || pObj->capacity <= 0) return; + + SHashNode *pNode, *pNext; + + if (pObj->multithreadSafe) { + __wr_lock(&pObj->lock); + } + + if (pObj->hashList) { + for (int32_t i = 0; i < pObj->capacity; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + pNode = pEntry->next; + + while (pNode) { + pNext = pNode->next; + free(pNode); + pNode = pNext; + } + + tfree(pEntry); + } + + free(pObj->hashList); + } + + if (pObj->multithreadSafe) { + __unlock(&pObj->lock); + __lock_destroy(&pObj->lock); + } + + memset(pObj, 0, sizeof(HashObj)); + free(pObj); +} + +// for profile only +int32_t taosGetHashMaxOverflowLength(HashObj* pObj) { + if (pObj == NULL || pObj->size == 0) { + return 0; + } + + int32_t num = 0; + + for(int32_t i = 0; i < pObj->size; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + if (num < pEntry->num) { + num = pEntry->num; + } + } + + return num; +} + +int32_t taosCheckHashTable(HashObj *pObj) { + for(int32_t i = 0; i < pObj->capacity; ++i) { + SHashEntry *pEntry = pObj->hashList[i]; + + SHashNode* pNode = pEntry->next; + if (pNode != NULL) { + assert(pEntry == pNode->prev1); + int32_t num = 1; + + SHashNode* pNext = pNode->next; + + while(pNext) { + assert(pNext->prev == pNode); + + pNode = pNext; + pNext = pNext->next; + num ++; + } + + assert(num == pEntry->num); + } + } + + return 0; +} diff --git a/src/util/src/tbase64.c b/src/util/src/tbase64.c index 02ec756e04469bc4c1716f20b6530d29f817ac37..937adfde5cd68040bdaa330ad43cbcd31a724a71 100644 --- a/src/util/src/tbase64.c +++ b/src/util/src/tbase64.c @@ -98,7 +98,7 @@ unsigned char *base64_decode(const char *value, int inlen, int *outlen) { base64_decode_error: free(result); - *result = 0; + result = 0; *outlen = 0; return result; diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 8a2f1347df9e7eb7d7fb29623eab4120b5484aeb..e6213c45a41f104e6d948c2b46e281e9909ea2b8 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -20,6 +20,7 @@ #include "ttime.h" #include "ttimer.h" #include "tutil.h" +#include "hashutil.h" #define HASH_MAX_CAPACITY (1024*1024*16) #define HASH_VALUE_IN_TRASH (-1) @@ -587,8 +588,8 @@ void *taosAddDataIntoCache(void *handle, char *key, char *pData, int dataSize, i pNode = taosAddToCacheImpl(pObj, key, keyLen, pData, dataSize, keepTime * 1000L); if (NULL != pNode) { pTrace( - "key:%s %p added into cache, slot:%d, addTime:%lld, expireTime:%lld, cache total:%d, " - "size:%lldbytes, collision:%d", + "key:%s %p added into cache, slot:%d, addTime:%" PRIu64 ", expireTime:%" PRIu64 ", cache total:%d, " + "size:%" PRId64 " bytes, collision:%d", pNode->key, pNode, HASH_INDEX(pNode->hashVal, pObj->capacity), pNode->addTime, pNode->time, pObj->size, pObj->totalSize, pObj->statistics.numOfCollision); } @@ -711,7 +712,7 @@ void *taosUpdateDataFromCache(void *handle, char *key, char *pData, int size, in pObj->totalSize); } else { pNew = taosUpdateCacheImpl(pObj, pNode, key, keyLen, pData, size, duration * 1000L); - pTrace("key:%s updated.expireTime:%lld.refCnt:%d", key, pNode->time, pNode->refCount); + pTrace("key:%s updated.expireTime:%" PRIu64 ".refCnt:%d", key, pNode->time, pNode->refCount); } __cache_unlock(pObj); @@ -901,5 +902,46 @@ void taosCleanUpDataCache(void *handle) { } pObj->deleting = 1; - return; +} + +void* taosGetDataFromExists(void* handle, void* data) { + SCacheObj *pObj = (SCacheObj *)handle; + if (pObj == NULL || data == NULL) return NULL; + + size_t offset = offsetof(SDataNode, data); + SDataNode *ptNode = (SDataNode *)((char *)data - offset); + + if (ptNode->signature != (uint64_t) ptNode) { + pError("key: %p the data from cache is invalid", ptNode); + return NULL; + } + + int32_t ref = atomic_add_fetch_32(&ptNode->refCount, 1); + pTrace("%p add ref data in cache, refCnt:%d", data, ref) + + // the data if referenced by at least one object, so the reference count must be greater than the value of 2. + assert(ref >= 2); + return data; +} + +void* taosTransferDataInCache(void* handle, void** data) { + SCacheObj *pObj = (SCacheObj *)handle; + if (pObj == NULL || data == NULL) return NULL; + + size_t offset = offsetof(SDataNode, data); + SDataNode *ptNode = (SDataNode *)((char *)(*data) - offset); + + if (ptNode->signature != (uint64_t) ptNode) { + pError("key: %p the data from cache is invalid", ptNode); + return NULL; + } + + assert(ptNode->refCount >= 1); + + char* d = *data; + + // clear its reference to old area + *data = NULL; + + return d; } diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c index 1ac42377fb62c9f9da1186d068da6ce603a77eea..0f0c7bed347fce36ea8054ce523619998a1f6327 100644 --- a/src/util/src/tcompression.c +++ b/src/util/src/tcompression.c @@ -355,16 +355,16 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o tmp_bit = (LONG_BYTES * BITS_PER_BYTE) - BUILDIN_CLZL(zigzag_value); } - if (elems + 1 <= selector_to_elems[selector] && elems + 1 <= selector_to_elems[bit_to_selector[tmp_bit]]) { + if (elems + 1 <= selector_to_elems[(int)selector] && elems + 1 <= selector_to_elems[(int)(bit_to_selector[(int)tmp_bit])]) { // If can hold another one. - selector = selector > bit_to_selector[tmp_bit] ? selector : bit_to_selector[tmp_bit]; + selector = selector > bit_to_selector[(int)tmp_bit] ? selector : bit_to_selector[(int)tmp_bit]; elems++; - bit = bit_per_integer[selector]; + bit = bit_per_integer[(int)selector]; } else { // if cannot hold another one. - while (elems < selector_to_elems[selector]) selector++; - elems = selector_to_elems[selector]; - bit = bit_per_integer[selector]; + while (elems < selector_to_elems[(int)selector]) selector++; + elems = selector_to_elems[(int)selector]; + bit = bit_per_integer[(int)selector]; break; } prev_value_tmp = curr_value; @@ -455,8 +455,8 @@ int tsDecompressINTImp(const char *const input, const int nelements, char *const memcpy(&w, ip, LONG_BYTES); char selector = (char)(w & INT64MASK(4)); // selector = 4 - char bit = bit_per_integer[selector]; // bit = 3 - int elems = selector_to_elems[selector]; + char bit = bit_per_integer[(int)selector]; // bit = 3 + int elems = selector_to_elems[(int)selector]; for (int i = 0; i < elems; i++) { uint64_t zigzag_value; diff --git a/src/util/src/tcrc32c.c b/src/util/src/tcrc32c.c index 705ca5872ae5e9ab0b0d826c12a52ea5a4634a36..546693c4bbc82750b7bbc1a117469fb816fd0362 100644 --- a/src/util/src/tcrc32c.c +++ b/src/util/src/tcrc32c.c @@ -25,8 +25,6 @@ #include #include "tcrc32c.h" -//todo : use the original source code -#pragma GCC diagnostic ignored "-Wunused-function" #define POLY 0x82f63b78 #define LONG_SHIFT 8192 @@ -1093,6 +1091,7 @@ static uint32_t short_shifts[4][256] = { 0xe1a734e7, 0xc41cc13c, 0x140cd014, 0x31b725cf, 0x5f7b3ba2, 0x7ac0ce79, 0x82e30778, 0xa758f2a3, 0xc994ecce, 0xec2f1915}}; +#if 0 static uint32_t append_trivial(uint32_t crc, crc_stream input, size_t length) { for (size_t i = 0; i < length; ++i) { crc = crc ^ input[i]; @@ -1130,6 +1129,7 @@ static uint32_t append_adler_table(uint32_t crci, crc_stream input, } return (uint32_t)(crc ^ 0xffffffff); } +#endif /* Table-driven software version as a fall-back. This is about 15 times slower than using the hardware instructions. This assumes little-endian integers, diff --git a/src/util/src/textbuffer.c b/src/util/src/textbuffer.c index 5652c0aca67998cdad8bdbaa44be8baec20906b8..860de6782be97ce83032cf60d3d2f303af18c795 100644 --- a/src/util/src/textbuffer.c +++ b/src/util/src/textbuffer.c @@ -12,7 +12,6 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - #include "os.h" #include "taos.h" #include "taosmsg.h" @@ -23,10 +22,8 @@ #include "ttypes.h" #include "tutil.h" -#pragma GCC diagnostic ignored "-Wformat" - #define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \ - (data + (schema)->colOffset[colId] * (allrow) + (rowId) * (schema)->pFields[colId].bytes) + (data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes) int32_t tmpFileSerialNum = 0; @@ -52,46 +49,50 @@ void getTmpfilePath(const char *fileNamePrefix, char *dstPath) { } /* - * tColModel is deeply copy + * SColumnModel is deeply copy */ -void tExtMemBufferCreate(tExtMemBuffer **pMemBuffer, int32_t nBufferSize, int32_t elemSize, const char *tmpDataFilePath, - tColModel *pModel) { - (*pMemBuffer) = (tExtMemBuffer *)calloc(1, sizeof(tExtMemBuffer)); - - (*pMemBuffer)->nPageSize = DEFAULT_PAGE_SIZE; - (*pMemBuffer)->nMaxSizeInPages = ALIGN8(nBufferSize) / (*pMemBuffer)->nPageSize; - (*pMemBuffer)->nElemSize = elemSize; +tExtMemBuffer* createExtMemBuffer(int32_t inMemSize, int32_t elemSize, SColumnModel *pModel) { + tExtMemBuffer* pMemBuffer = (tExtMemBuffer *)calloc(1, sizeof(tExtMemBuffer)); - (*pMemBuffer)->numOfElemsPerPage = ((*pMemBuffer)->nPageSize - sizeof(tFilePage)) / (*pMemBuffer)->nElemSize; + pMemBuffer->pageSize = DEFAULT_PAGE_SIZE; + pMemBuffer->inMemCapacity = ALIGN8(inMemSize) / pMemBuffer->pageSize; + pMemBuffer->nElemSize = elemSize; - strcpy((*pMemBuffer)->dataFilePath, tmpDataFilePath); + pMemBuffer->numOfElemsPerPage = (pMemBuffer->pageSize - sizeof(tFilePage)) / pMemBuffer->nElemSize; + + char name[MAX_TMPFILE_PATH_LENGTH] = {0}; + getTmpfilePath("extbuf", name); + + pMemBuffer->path = strdup(name); + pTrace("create tmp file:%s", pMemBuffer->path); + + SFileInfo *pFMeta = &pMemBuffer->fileMeta; - tFileMeta *pFMeta = &(*pMemBuffer)->fileMeta; - - pFMeta->numOfElemsInFile = 0; - pFMeta->nFileSize = 0; - pFMeta->nPageSize = DEFAULT_PAGE_SIZE; + pFMeta->pageSize = DEFAULT_PAGE_SIZE; pFMeta->flushoutData.nAllocSize = 4; pFMeta->flushoutData.nLength = 0; pFMeta->flushoutData.pFlushoutInfo = (tFlushoutInfo *)calloc(4, sizeof(tFlushoutInfo)); - (*pMemBuffer)->pColModel = tColModelCreate(pModel->pFields, pModel->numOfCols, (*pMemBuffer)->numOfElemsPerPage); + pMemBuffer->pColumnModel = cloneColumnModel(pModel); + pMemBuffer->pColumnModel->capacity = pMemBuffer->numOfElemsPerPage; + + return pMemBuffer; } -void tExtMemBufferDestroy(tExtMemBuffer **pMemBuffer) { - if ((*pMemBuffer) == NULL) { - return; +void* destoryExtMemBuffer(tExtMemBuffer *pMemBuffer) { + if (pMemBuffer == NULL) { + return NULL; } // release flush out info link - tFileMeta *pFileMeta = &(*pMemBuffer)->fileMeta; + SFileInfo *pFileMeta = &pMemBuffer->fileMeta; if (pFileMeta->flushoutData.nAllocSize != 0 && pFileMeta->flushoutData.pFlushoutInfo != NULL) { tfree(pFileMeta->flushoutData.pFlushoutInfo); } // release all in-memory buffer pages - tFilePagesItem *pFilePages = (*pMemBuffer)->pHead; + tFilePagesItem *pFilePages = pMemBuffer->pHead; while (pFilePages != NULL) { tFilePagesItem *pTmp = pFilePages; pFilePages = pFilePages->pNext; @@ -99,23 +100,27 @@ void tExtMemBufferDestroy(tExtMemBuffer **pMemBuffer) { } // close temp file - if ((*pMemBuffer)->dataFile != 0) { - int32_t ret = fclose((*pMemBuffer)->dataFile); - if (ret != 0) { - pError("failed to close file:%s, reason:%s", (*pMemBuffer)->dataFilePath, strerror(errno)); + if (pMemBuffer->file != 0) { + if (fclose(pMemBuffer->file) != 0) { + pError("failed to close file:%s, reason:%s", pMemBuffer->path, strerror(errno)); } - unlink((*pMemBuffer)->dataFilePath); + + pTrace("remove temp file:%s for external buffer", pMemBuffer->path); + unlink(pMemBuffer->path); } - tColModelDestroy((*pMemBuffer)->pColModel); + destroyColumnModel(pMemBuffer->pColumnModel); - tfree(*pMemBuffer); + tfree(pMemBuffer->path); + tfree(pMemBuffer); + + return NULL; } /* * alloc more memory for flush out info entries. */ -static bool allocFlushoutInfoEntries(tFileMeta *pFileMeta) { +static bool allocFlushoutInfoEntries(SFileInfo *pFileMeta) { pFileMeta->flushoutData.nAllocSize = pFileMeta->flushoutData.nAllocSize << 1; tFlushoutInfo *tmp = (tFlushoutInfo *)realloc(pFileMeta->flushoutData.pFlushoutInfo, @@ -129,12 +134,12 @@ static bool allocFlushoutInfoEntries(tFileMeta *pFileMeta) { return true; } -bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) { - if (pMemBuffer->numOfPagesInMem > 0 && pMemBuffer->numOfPagesInMem == pMemBuffer->nMaxSizeInPages) { - /* - * the in-mem buffer is full. - * To flush data to disk to accommodate more data - */ +static bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) { + /* + * the in-mem buffer is full. + * To flush data to disk to accommodate more data + */ + if (pMemBuffer->numOfInMemPages > 0 && pMemBuffer->numOfInMemPages == pMemBuffer->inMemCapacity) { if (!tExtMemBufferFlush(pMemBuffer)) { return false; } @@ -142,12 +147,12 @@ bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) { /* * We do not recycle the file page structure. And in flush data operations, all - * filepage that are full of data are destroyed after data being flushed to disk. + * file page that are full of data are destroyed after data being flushed to disk. * * The memory buffer pages may be recycle in order to avoid unnecessary memory * allocation later. */ - tFilePagesItem *item = (tFilePagesItem *)calloc(1, pMemBuffer->nPageSize + sizeof(tFilePagesItem)); + tFilePagesItem *item = (tFilePagesItem *)calloc(1, pMemBuffer->pageSize + sizeof(tFilePagesItem)); if (item == NULL) { return false; } @@ -163,8 +168,7 @@ bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) { pMemBuffer->pHead = item; } - pMemBuffer->numOfPagesInMem += 1; - + pMemBuffer->numOfInMemPages += 1; return true; } @@ -173,7 +177,7 @@ bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) { */ int16_t tExtMemBufferPut(tExtMemBuffer *pMemBuffer, void *data, int32_t numOfRows) { if (numOfRows == 0) { - return pMemBuffer->numOfPagesInMem; + return pMemBuffer->numOfInMemPages; } tFilePagesItem *pLast = pMemBuffer->pTail; @@ -185,24 +189,23 @@ int16_t tExtMemBufferPut(tExtMemBuffer *pMemBuffer, void *data, int32_t numOfRow pLast = pMemBuffer->pTail; } - if (pLast->item.numOfElems + numOfRows <= pMemBuffer->numOfElemsPerPage) { - // enough space for records - tColModelAppend(pMemBuffer->pColModel, &pLast->item, data, 0, numOfRows, numOfRows); + if (pLast->item.numOfElems + numOfRows <= pMemBuffer->numOfElemsPerPage) { // enough space for records + tColModelAppend(pMemBuffer->pColumnModel, &pLast->item, data, 0, numOfRows, numOfRows); + pMemBuffer->numOfElemsInBuffer += numOfRows; - pMemBuffer->numOfAllElems += numOfRows; + pMemBuffer->numOfTotalElems += numOfRows; } else { int32_t numOfRemainEntries = pMemBuffer->numOfElemsPerPage - pLast->item.numOfElems; - tColModelAppend(pMemBuffer->pColModel, &pLast->item, data, 0, numOfRemainEntries, numOfRows); + tColModelAppend(pMemBuffer->pColumnModel, &pLast->item, data, 0, numOfRemainEntries, numOfRows); pMemBuffer->numOfElemsInBuffer += numOfRemainEntries; - pMemBuffer->numOfAllElems += numOfRemainEntries; + pMemBuffer->numOfTotalElems += numOfRemainEntries; int32_t hasWritten = numOfRemainEntries; int32_t remain = numOfRows - numOfRemainEntries; while (remain > 0) { - if (!tExtMemBufferAlloc(pMemBuffer)) { - // failed to allocate memory buffer + if (!tExtMemBufferAlloc(pMemBuffer)) { // failed to allocate memory buffer return -1; } @@ -213,10 +216,10 @@ int16_t tExtMemBufferPut(tExtMemBuffer *pMemBuffer, void *data, int32_t numOfRow numOfWriteElems = remain; } - pMemBuffer->numOfAllElems += numOfWriteElems; + pMemBuffer->numOfTotalElems += numOfWriteElems; pLast = pMemBuffer->pTail; - tColModelAppend(pMemBuffer->pColModel, &pLast->item, data, hasWritten, numOfWriteElems, numOfRows); + tColModelAppend(pMemBuffer->pColumnModel, &pLast->item, data, hasWritten, numOfWriteElems, numOfRows); remain -= numOfWriteElems; pMemBuffer->numOfElemsInBuffer += numOfWriteElems; @@ -224,11 +227,11 @@ int16_t tExtMemBufferPut(tExtMemBuffer *pMemBuffer, void *data, int32_t numOfRow } } - return pMemBuffer->numOfPagesInMem; + return pMemBuffer->numOfInMemPages; } static bool tExtMemBufferUpdateFlushoutInfo(tExtMemBuffer *pMemBuffer) { - tFileMeta *pFileMeta = &pMemBuffer->fileMeta; + SFileInfo *pFileMeta = &pMemBuffer->fileMeta; if (pMemBuffer->flushModel == MULTIPLE_APPEND_MODEL) { if (pFileMeta->flushoutData.nLength == pFileMeta->flushoutData.nAllocSize && !allocFlushoutInfoEntries(pFileMeta)) { @@ -245,46 +248,47 @@ static bool tExtMemBufferUpdateFlushoutInfo(tExtMemBuffer *pMemBuffer) { } // only the page still in buffer is flushed out to disk - pFlushoutInfo->numOfPages = pMemBuffer->numOfPagesInMem; + pFlushoutInfo->numOfPages = pMemBuffer->numOfInMemPages; pFileMeta->flushoutData.nLength += 1; } else { - // always update the first flushout array in single_flush_model + // always update the first flush out array in single_flush_model pFileMeta->flushoutData.nLength = 1; tFlushoutInfo *pFlushoutInfo = &pFileMeta->flushoutData.pFlushoutInfo[0]; - pFlushoutInfo->numOfPages += pMemBuffer->numOfPagesInMem; + pFlushoutInfo->numOfPages += pMemBuffer->numOfInMemPages; } return true; } static void tExtMemBufferClearFlushoutInfo(tExtMemBuffer *pMemBuffer) { - tFileMeta *pFileMeta = &pMemBuffer->fileMeta; + SFileInfo *pFileMeta = &pMemBuffer->fileMeta; pFileMeta->flushoutData.nLength = 0; memset(pFileMeta->flushoutData.pFlushoutInfo, 0, sizeof(tFlushoutInfo) * pFileMeta->flushoutData.nAllocSize); } bool tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { - if (pMemBuffer->numOfAllElems == 0) { + if (pMemBuffer->numOfTotalElems == 0) { return true; } - if (pMemBuffer->dataFile == NULL) { - if ((pMemBuffer->dataFile = fopen(pMemBuffer->dataFilePath, "wb+")) == NULL) { + if (pMemBuffer->file == NULL) { + if ((pMemBuffer->file = fopen(pMemBuffer->path, "wb+")) == NULL) { return false; } } + /* all data has been flushed to disk, ignore flush operation */ if (pMemBuffer->numOfElemsInBuffer == 0) { - /* all data has been flushed to disk, ignore flush operation */ return true; } - bool ret = true; + bool ret = true; + tFilePagesItem *first = pMemBuffer->pHead; while (first != NULL) { - size_t retVal = fwrite((char *)&(first->item), pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file); if (retVal <= 0) { // failed to write to buffer, may be not enough space ret = false; } @@ -298,12 +302,12 @@ bool tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { tfree(ptmp); // release all data in memory buffer } - fflush(pMemBuffer->dataFile); // flush to disk + fflush(pMemBuffer->file); // flush to disk tExtMemBufferUpdateFlushoutInfo(pMemBuffer); pMemBuffer->numOfElemsInBuffer = 0; - pMemBuffer->numOfPagesInMem = 0; + pMemBuffer->numOfInMemPages = 0; pMemBuffer->pHead = NULL; pMemBuffer->pTail = NULL; @@ -311,11 +315,11 @@ bool tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { } void tExtMemBufferClear(tExtMemBuffer *pMemBuffer) { - if (pMemBuffer == NULL || pMemBuffer->numOfAllElems == 0) return; + if (pMemBuffer == NULL || pMemBuffer->numOfTotalElems == 0) { + return; + } - /* - * release all data in memory buffer - */ + //release all data in memory buffer tFilePagesItem *first = pMemBuffer->pHead; while (first != NULL) { tFilePagesItem *ptmp = first; @@ -327,15 +331,16 @@ void tExtMemBufferClear(tExtMemBuffer *pMemBuffer) { pMemBuffer->fileMeta.nFileSize = 0; pMemBuffer->numOfElemsInBuffer = 0; - pMemBuffer->numOfPagesInMem = 0; + pMemBuffer->numOfInMemPages = 0; + pMemBuffer->pHead = NULL; pMemBuffer->pTail = NULL; tExtMemBufferClearFlushoutInfo(pMemBuffer); - if (pMemBuffer->dataFile != NULL) { - // reset the write pointer to the header - fseek(pMemBuffer->dataFile, 0, SEEK_SET); + // reset the write pointer to the header + if (pMemBuffer->file != NULL) { + fseek(pMemBuffer->file, 0, SEEK_SET); } } @@ -349,8 +354,8 @@ bool tExtMemBufferLoadData(tExtMemBuffer *pMemBuffer, tFilePage *pFilePage, int3 return false; } - size_t ret = fseek(pMemBuffer->dataFile, (pInfo->startPageId + pageIdx) * pMemBuffer->nPageSize, SEEK_SET); - ret = fread(pFilePage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + size_t ret = fseek(pMemBuffer->file, (pInfo->startPageId + pageIdx) * pMemBuffer->pageSize, SEEK_SET); + ret = fread(pFilePage, pMemBuffer->pageSize, 1, pMemBuffer->file); return (ret > 0); } @@ -358,474 +363,11 @@ bool tExtMemBufferLoadData(tExtMemBuffer *pMemBuffer, tFilePage *pFilePage, int3 bool tExtMemBufferIsAllDataInMem(tExtMemBuffer *pMemBuffer) { return (pMemBuffer->fileMeta.nFileSize == 0); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// TODO safty check in result -void tBucketBigIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { - int64_t v = *(int64_t *)value; - - if (pBucket->nRange.i64MaxVal == INT64_MIN) { - if (v >= 0) { - *segIdx = ((v >> (64 - 9)) >> 6) + 8; - *slotIdx = (v >> (64 - 9)) & 0x3F; - } else { // v<0 - *segIdx = ((-v) >> (64 - 9)) >> 6; - *slotIdx = ((-v) >> (64 - 9)) & 0x3F; - *segIdx = 7 - (*segIdx); - } - } else { - // todo hash for bigint and float and double - int64_t span = pBucket->nRange.i64MaxVal - pBucket->nRange.i64MinVal; - if (span < pBucket->nTotalSlots) { - int32_t delta = (int32_t)(v - pBucket->nRange.i64MinVal); - *segIdx = delta / pBucket->nSlotsOfSeg; - *slotIdx = delta % pBucket->nSlotsOfSeg; - } else { - double x = (double)span / pBucket->nTotalSlots; - double posx = (v - pBucket->nRange.i64MinVal) / x; - if (v == pBucket->nRange.i64MaxVal) { - posx -= 1; - } - - *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; - *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; - } - } -} - -// todo refactor to more generic -void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { - int32_t v = *(int32_t *)value; - - if (pBucket->nRange.iMaxVal == INT32_MIN) { - /* - * taking negative integer into consideration, - * there is only half of pBucket->segs available for non-negative integer - */ - // int32_t numOfSlots = pBucket->nTotalSlots>>1; - // int32_t bits = bitsOfNumber(numOfSlots)-1; - - if (v >= 0) { - *segIdx = ((v >> (32 - 9)) >> 6) + 8; - *slotIdx = (v >> (32 - 9)) & 0x3F; - } else { // v<0 - *segIdx = ((-v) >> (32 - 9)) >> 6; - *slotIdx = ((-v) >> (32 - 9)) & 0x3F; - *segIdx = 7 - (*segIdx); - } - } else { - // divide a range of [iMinVal, iMaxVal] into 1024 buckets - int32_t span = pBucket->nRange.iMaxVal - pBucket->nRange.iMinVal; - if (span < pBucket->nTotalSlots) { - int32_t delta = v - pBucket->nRange.iMinVal; - *segIdx = delta / pBucket->nSlotsOfSeg; - *slotIdx = delta % pBucket->nSlotsOfSeg; - } else { - double x = (double)span / pBucket->nTotalSlots; - double posx = (v - pBucket->nRange.iMinVal) / x; - if (v == pBucket->nRange.iMaxVal) { - posx -= 1; - } - *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; - *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; - } - } -} - -void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { - //double v = *(double *)value; - double v = GET_DOUBLE_VAL(value); - - if (pBucket->nRange.dMinVal == DBL_MAX) { - /* - * taking negative integer into consideration, - * there is only half of pBucket->segs available for non-negative integer - */ - double x = DBL_MAX / (pBucket->nTotalSlots >> 1); - double posx = (v + DBL_MAX) / x; - *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; - *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; - } else { - // divide a range of [dMinVal, dMaxVal] into 1024 buckets - double span = pBucket->nRange.dMaxVal - pBucket->nRange.dMinVal; - if (span < pBucket->nTotalSlots) { - int32_t delta = (int32_t)(v - pBucket->nRange.dMinVal); - *segIdx = delta / pBucket->nSlotsOfSeg; - *slotIdx = delta % pBucket->nSlotsOfSeg; - } else { - double x = span / pBucket->nTotalSlots; - double posx = (v - pBucket->nRange.dMinVal) / x; - if (v == pBucket->nRange.dMaxVal) { - posx -= 1; - } - *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; - *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; - } - - if (*segIdx < 0 || *segIdx > 16 || *slotIdx < 0 || *slotIdx > 64) { - pError("error in hash process. segment is: %d, slot id is: %d\n", *segIdx, *slotIdx); - } - } -} - -tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, int16_t dataType, tOrderDescriptor *pDesc) { - tMemBucket* pBucket = (tMemBucket *)malloc(sizeof(tMemBucket)); - - pBucket->nTotalSlots = totalSlots; - pBucket->nSlotsOfSeg = 1 << 6; // 64 Segments, 16 slots each seg. - pBucket->dataType = dataType; - pBucket->nElemSize = nElemSize; - pBucket->nPageSize = DEFAULT_PAGE_SIZE; - - pBucket->numOfElems = 0; - pBucket->numOfSegs = pBucket->nTotalSlots / pBucket->nSlotsOfSeg; - - pBucket->nTotalBufferSize = nBufferSize; - - pBucket->maxElemsCapacity = pBucket->nTotalBufferSize / pBucket->nElemSize; - - pBucket->numOfTotalPages = pBucket->nTotalBufferSize / pBucket->nPageSize; - pBucket->numOfAvailPages = pBucket->numOfTotalPages; - - pBucket->pOrderDesc = pDesc; - - switch (pBucket->dataType) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_TINYINT: { - pBucket->nRange.iMinVal = INT32_MAX; - pBucket->nRange.iMaxVal = INT32_MIN; - pBucket->HashFunc = tBucketIntHash; - break; - }; - case TSDB_DATA_TYPE_DOUBLE: - case TSDB_DATA_TYPE_FLOAT: { - pBucket->nRange.dMinVal = DBL_MAX; - pBucket->nRange.dMaxVal = -DBL_MAX; - pBucket->HashFunc = tBucketDoubleHash; - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - pBucket->nRange.i64MinVal = INT64_MAX; - pBucket->nRange.i64MaxVal = INT64_MIN; - pBucket->HashFunc = tBucketBigIntHash; - break; - }; - default: { - pError("MemBucket:%p,not support data type %d,failed", *pBucket, pBucket->dataType); - tfree(pBucket); - return NULL; - } - } - - if (pDesc->pSchema->numOfCols != 1 || pDesc->pSchema->colOffset[0] != 0) { - pError("MemBucket:%p,only consecutive data is allowed,invalid numOfCols:%d or offset:%d", - *pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]); - tfree(pBucket); - return NULL; - } - - if (pDesc->pSchema->pFields[0].type != dataType) { - pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", *pBucket, - pDesc->pSchema->pFields[0].type, dataType); - tfree(pBucket); - return NULL; - } - - if (pBucket->numOfTotalPages < pBucket->nTotalSlots) { - pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", *pBucket, pBucket->numOfTotalPages); - } - - pBucket->pSegs = (tMemBucketSegment *)malloc(pBucket->numOfSegs * sizeof(tMemBucketSegment)); - - for (int32_t i = 0; i < pBucket->numOfSegs; ++i) { - pBucket->pSegs[i].numOfSlots = pBucket->nSlotsOfSeg; - pBucket->pSegs[i].pBuffer = NULL; - pBucket->pSegs[i].pBoundingEntries = NULL; - } - - pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", *pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE, - pBucket->nElemSize); - - return pBucket; -} - -void tMemBucketDestroy(tMemBucket *pBucket) { - if (pBucket == NULL) { - return; - } - - if (pBucket->pSegs) { - for (int32_t i = 0; i < pBucket->numOfSegs; ++i) { - tMemBucketSegment *pSeg = &(pBucket->pSegs[i]); - tfree(pSeg->pBoundingEntries); - - if (pSeg->pBuffer == NULL || pSeg->numOfSlots == 0) { - continue; - } - - for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { - if (pSeg->pBuffer[j] != NULL) { - tExtMemBufferDestroy(&pSeg->pBuffer[j]); - } - } - tfree(pSeg->pBuffer); - } - } - - tfree(pBucket->pSegs); - tfree(pBucket); -} - -/* - * find the slots which accounts for largest proportion of total in-memory buffer - */ -static void tBucketGetMaxMemSlot(tMemBucket *pBucket, int16_t *segIdx, int16_t *slotIdx) { - *segIdx = -1; - *slotIdx = -1; - - int32_t val = 0; - for (int32_t k = 0; k < pBucket->numOfSegs; ++k) { - tMemBucketSegment *pSeg = &pBucket->pSegs[k]; - for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { - if (pSeg->pBuffer == NULL || pSeg->pBuffer[i] == NULL) { - continue; - } - - if (val < pSeg->pBuffer[i]->numOfPagesInMem) { - val = pSeg->pBuffer[i]->numOfPagesInMem; - *segIdx = k; - *slotIdx = i; - } - } - } -} - -static void resetBoundingBox(tMemBucketSegment *pSeg, int32_t type) { - switch (type) { - case TSDB_DATA_TYPE_BIGINT: { - for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { - pSeg->pBoundingEntries[i].i64MaxVal = INT64_MIN; - pSeg->pBoundingEntries[i].i64MinVal = INT64_MAX; - } - break; - }; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_TINYINT: { - for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { - pSeg->pBoundingEntries[i].iMaxVal = INT32_MIN; - pSeg->pBoundingEntries[i].iMinVal = INT32_MAX; - } - break; - }; - case TSDB_DATA_TYPE_DOUBLE: - case TSDB_DATA_TYPE_FLOAT: { - for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { - pSeg->pBoundingEntries[i].dMaxVal = -DBL_MAX; - pSeg->pBoundingEntries[i].dMinVal = DBL_MAX; - } - break; - } - } -} - -void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) { - switch (dataType) { - case TSDB_DATA_TYPE_INT: { - int32_t val = *(int32_t *)data; - if (r->iMinVal > val) { - r->iMinVal = val; - } - - if (r->iMaxVal < val) { - r->iMaxVal = val; - } - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - int64_t val = *(int64_t *)data; - if (r->i64MinVal > val) { - r->i64MinVal = val; - } - - if (r->i64MaxVal < val) { - r->i64MaxVal = val; - } - break; - }; - case TSDB_DATA_TYPE_SMALLINT: { - int32_t val = *(int16_t *)data; - if (r->iMinVal > val) { - r->iMinVal = val; - } - - if (r->iMaxVal < val) { - r->iMaxVal = val; - } - break; - }; - case TSDB_DATA_TYPE_TINYINT: { - int32_t val = *(int8_t *)data; - if (r->iMinVal > val) { - r->iMinVal = val; - } - - if (r->iMaxVal < val) { - r->iMaxVal = val; - } - - break; - }; - case TSDB_DATA_TYPE_DOUBLE: { - //double val = *(double *)data; - double val = GET_DOUBLE_VAL(data); - if (r->dMinVal > val) { - r->dMinVal = val; - } - - if (r->dMaxVal < val) { - r->dMaxVal = val; - } - break; - }; - case TSDB_DATA_TYPE_FLOAT: { - //double val = *(float *)data; - double val = GET_FLOAT_VAL(data); - - if (r->dMinVal > val) { - r->dMinVal = val; - } - - if (r->dMaxVal < val) { - r->dMaxVal = val; - } - break; - }; - default: { assert(false); } - } -} - -/* - * in memory bucket, we only accept the simple data consecutive put in a row/column - * no column-model in this case. - */ -void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows) { - pBucket->numOfElems += numOfRows; - int16_t segIdx = 0, slotIdx = 0; - - for (int32_t i = 0; i < numOfRows; ++i) { - char *d = (char *)data + i * tDataTypeDesc[pBucket->dataType].nSize; - - switch (pBucket->dataType) { - case TSDB_DATA_TYPE_SMALLINT: { - int32_t val = *(int16_t *)d; - (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - int32_t val = *(int8_t *)d; - (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); - break; - } - case TSDB_DATA_TYPE_INT: { - int32_t val = *(int32_t *)d; - (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - int64_t val = *(int64_t *)d; - (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - //double val = *(double *)d; - double val = GET_DOUBLE_VAL(d); - (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - //double val = *(float *)d; - double val = GET_FLOAT_VAL(d); - (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); - break; - } - } - - tMemBucketSegment *pSeg = &pBucket->pSegs[segIdx]; - if (pSeg->pBoundingEntries == NULL) { - pSeg->pBoundingEntries = (MinMaxEntry *)malloc(sizeof(MinMaxEntry) * pBucket->nSlotsOfSeg); - resetBoundingBox(pSeg, pBucket->dataType); - } - - if (pSeg->pBuffer == NULL) { - pSeg->pBuffer = (tExtMemBuffer **)calloc(pBucket->nSlotsOfSeg, sizeof(void *)); - } - - if (pSeg->pBuffer[slotIdx] == NULL) { - char name[MAX_TMPFILE_PATH_LENGTH] = {0}; - getTmpfilePath("tb_ex_bk_%lld_%lld_%d_%d", name); - - tExtMemBufferCreate(&pSeg->pBuffer[slotIdx], pBucket->numOfTotalPages * pBucket->nPageSize, pBucket->nElemSize, - name, pBucket->pOrderDesc->pSchema); - pSeg->pBuffer[slotIdx]->flushModel = SINGLE_APPEND_MODEL; - pBucket->pOrderDesc->pSchema->maxCapacity = pSeg->pBuffer[slotIdx]->numOfElemsPerPage; - } - - tMemBucketUpdateBoundingBox(&pSeg->pBoundingEntries[slotIdx], d, pBucket->dataType); - - // ensure available memory pages to allocate - int16_t cseg = 0, cslot = 0; - if (pBucket->numOfAvailPages == 0) { - pTrace("MemBucket:%p,max avail size:%d, no avail memory pages,", pBucket, pBucket->numOfTotalPages); - - tBucketGetMaxMemSlot(pBucket, &cseg, &cslot); - if (cseg == -1 || cslot == -1) { - pError("MemBucket:%p,failed to find appropriated avail buffer", pBucket); - return; - } - - if (cseg != segIdx || cslot != slotIdx) { - pBucket->numOfAvailPages += pBucket->pSegs[cseg].pBuffer[cslot]->numOfPagesInMem; - - int32_t avail = pBucket->pSegs[cseg].pBuffer[cslot]->numOfPagesInMem; - UNUSED(avail); - tExtMemBufferFlush(pBucket->pSegs[cseg].pBuffer[cslot]); - - pTrace("MemBucket:%p,seg:%d,slot:%d flushed to disk,new avail pages:%d", pBucket, cseg, cslot, - pBucket->numOfAvailPages); - } else { - pTrace("MemBucket:%p,failed to choose slot to flush to disk seg:%d,slot:%d", - pBucket, cseg, cslot); - } - } - int16_t consumedPgs = pSeg->pBuffer[slotIdx]->numOfPagesInMem; - - int16_t newPgs = tExtMemBufferPut(pSeg->pBuffer[slotIdx], d, 1); - /* - * trigger 1. page re-allocation, to reduce the available pages - * 2. page flushout, to increase the available pages - */ - pBucket->numOfAvailPages += (consumedPgs - newPgs); - } -} - -void releaseBucket(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { - if (segIdx < 0 || segIdx > pMemBucket->numOfSegs || slotIdx < 0) { - return; - } - - tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; - if (slotIdx < 0 || slotIdx >= pSeg->numOfSlots || pSeg->pBuffer[slotIdx] == NULL) { - return; - } - - tExtMemBufferDestroy(&pSeg->pBuffer[slotIdx]); -} - static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t colIdx, int32_t tsOrder) { if (f1 == f2) { return 0; } - + if (colIdx == 0 && tsOrder == TSQL_SO_DESC) { // primary column desc order return (f1 < f2) ? 1 : -1; } else { // asc @@ -833,7 +375,6 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t } } -// todo refactor static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) { switch (type) { case TSDB_DATA_TYPE_INT: { @@ -904,7 +445,7 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i return (ret < 0) ? -1 : 1; }; } - + return 0; } @@ -912,14 +453,14 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, int32_t s2, char *data2) { assert(numOfRows1 == numOfRows2); - int32_t cmpCnt = pDescriptor->orderIdx.numOfOrderedCols; + int32_t cmpCnt = pDescriptor->orderIdx.numOfCols; for (int32_t i = 0; i < cmpCnt; ++i) { int32_t colIdx = pDescriptor->orderIdx.pData[i]; - char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, numOfRows1, s1, colIdx); - char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pSchema, numOfRows2, s2, colIdx); + char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx); + char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx); - if (pDescriptor->pSchema->pFields[colIdx].type == TSDB_DATA_TYPE_TIMESTAMP) { + if (pDescriptor->pColumnModel->pFields[colIdx].field.type == TSDB_DATA_TYPE_TIMESTAMP) { int32_t ret = primaryKeyComparator(*(int64_t *)f1, *(int64_t *)f2, colIdx, pDescriptor->tsOrder); if (ret == 0) { continue; @@ -927,7 +468,7 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, return ret; } } else { - SSchema *pSchema = &pDescriptor->pSchema->pFields[colIdx]; + SSchema *pSchema = &pDescriptor->pColumnModel->pFields[colIdx]; int32_t ret = columnValueAscendingComparator(f1, f2, pSchema->type, pSchema->bytes); if (ret == 0) { continue; @@ -944,14 +485,14 @@ int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, int32_t s2, char *data2) { assert(numOfRows1 == numOfRows2); - int32_t cmpCnt = pDescriptor->orderIdx.numOfOrderedCols; + int32_t cmpCnt = pDescriptor->orderIdx.numOfCols; for (int32_t i = 0; i < cmpCnt; ++i) { int32_t colIdx = pDescriptor->orderIdx.pData[i]; - char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, numOfRows1, s1, colIdx); - char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pSchema, numOfRows2, s2, colIdx); + char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx); + char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx); - if (pDescriptor->pSchema->pFields[colIdx].type == TSDB_DATA_TYPE_TIMESTAMP) { + if (pDescriptor->pColumnModel->pFields[colIdx].field.type == TSDB_DATA_TYPE_TIMESTAMP) { int32_t ret = primaryKeyComparator(*(int64_t *)f1, *(int64_t *)f2, colIdx, pDescriptor->tsOrder); if (ret == 0) { continue; @@ -959,7 +500,7 @@ int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, return ret; } } else { - SSchema *pSchema = &pDescriptor->pSchema->pFields[colIdx]; + SSchema *pSchema = &pDescriptor->pColumnModel->pFields[colIdx]; int32_t ret = columnValueAscendingComparator(f1, f2, pSchema->type, pSchema->bytes); if (ret == 0) { continue; @@ -981,12 +522,13 @@ FORCE_INLINE int32_t compare_sd(tOrderDescriptor *pDescriptor, int32_t numOfRows return compare_d(pDescriptor, numOfRows, idx1, data, numOfRows, idx2, data); } -static void swap(tOrderDescriptor *pDescriptor, int32_t count, int32_t s1, char *data1, int32_t s2) { - for (int32_t i = 0; i < pDescriptor->pSchema->numOfCols; ++i) { - void *first = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, count, s1, i); - void *second = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, count, s2, i); +static void swap(SColumnModel *pColumnModel, int32_t count, int32_t s1, char *data1, int32_t s2) { + for (int32_t i = 0; i < pColumnModel->numOfCols; ++i) { + void *first = COLMODEL_GET_VAL(data1, pColumnModel, count, s1, i); + void *second = COLMODEL_GET_VAL(data1, pColumnModel, count, s2, i); - tsDataSwap(first, second, pDescriptor->pSchema->pFields[i].type, pDescriptor->pSchema->pFields[i].bytes); + SSchema* pSchema = &pColumnModel->pFields[i].field; + tsDataSwap(first, second, pSchema->type, pSchema->bytes); } } @@ -995,7 +537,7 @@ static void tColDataInsertSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, for (int32_t i = start + 1; i <= end; ++i) { for (int32_t j = i; j > start; --j) { if (compareFn(pDescriptor, numOfRows, j, j - 1, data) == -1) { - swap(pDescriptor, numOfRows, j - 1, data, j); + swap(pDescriptor->pColumnModel, numOfRows, j - 1, data, j); } else { break; } @@ -1016,7 +558,7 @@ static void UNUSED_FUNC tSortDataPrint(int32_t type, char *prefix, char *startx, break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%s:(%lld, %lld, %lld)\n", prefix, *(int64_t *)startx, *(int64_t *)midx, *(int64_t *)endx); + printf("%s:(%" PRId64 ", %" PRId64 ", %" PRId64 ")\n", prefix, *(int64_t *)startx, *(int64_t *)midx, *(int64_t *)endx); break; case TSDB_DATA_TYPE_FLOAT: printf("%s:(%f, %f, %f)\n", prefix, *(float *)startx, *(float *)midx, *(float *)endx); @@ -1037,33 +579,33 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta #if defined(_DEBUG_VIEW) int32_t f = pDescriptor->orderIdx.pData[0]; - char *midx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, midIdx, f); - char *startx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, start, f); - char *endx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, end, f); + char *midx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, midIdx, f); + char *startx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, start, f); + char *endx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, end, f); int32_t colIdx = pDescriptor->orderIdx.pData[0]; - tSortDataPrint(pDescriptor->pSchema->pFields[colIdx].type, "before", startx, midx, endx); + tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx); #endif if (compareFn(pDescriptor, numOfRows, midIdx, start, data) == 1) { - swap(pDescriptor, numOfRows, start, data, midIdx); + swap(pDescriptor->pColumnModel, numOfRows, start, data, midIdx); } if (compareFn(pDescriptor, numOfRows, midIdx, end, data) == 1) { - swap(pDescriptor, numOfRows, midIdx, data, start); - swap(pDescriptor, numOfRows, midIdx, data, end); + swap(pDescriptor->pColumnModel, numOfRows, midIdx, data, start); + swap(pDescriptor->pColumnModel, numOfRows, midIdx, data, end); } else if (compareFn(pDescriptor, numOfRows, start, end, data) == 1) { - swap(pDescriptor, numOfRows, start, data, end); + swap(pDescriptor->pColumnModel, numOfRows, start, data, end); } assert(compareFn(pDescriptor, numOfRows, midIdx, start, data) <= 0 && compareFn(pDescriptor, numOfRows, start, end, data) <= 0); #if defined(_DEBUG_VIEW) - midx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, midIdx, f); - startx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, start, f); - endx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, end, f); - tSortDataPrint(pDescriptor->pSchema->pFields[colIdx].type, "after", startx, midx, endx); + midx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, midIdx, f); + startx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, start, f); + endx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, end, f); + tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "after", startx, midx, endx); #endif } @@ -1071,9 +613,9 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t int32_t colIdx = pDescriptor->orderIdx.pData[0]; for (int32_t i = 0; i < len; ++i) { - char *startx = COLMODEL_GET_VAL(d, pDescriptor->pSchema, numOfRows, i, colIdx); + char *startx = COLMODEL_GET_VAL(d, pDescriptor->pColumnModel, numOfRows, i, colIdx); - switch (pDescriptor->pSchema->pFields[colIdx].type) { + switch (pDescriptor->pColumnModel->pFields[colIdx].field.type) { case TSDB_DATA_TYPE_DOUBLE: printf("%lf\t", *(double *)startx); break; @@ -1092,7 +634,7 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)startx); + printf("%" PRId64 "\t", *(int64_t *)startx); break; case TSDB_DATA_TYPE_BINARY: printf("%s\t", startx); @@ -1117,15 +659,15 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta } #ifdef _DEBUG_VIEW - printf("before sort:\n"); - tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +// printf("before sort:\n"); +// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); #endif int32_t s = start, e = end; median(pDescriptor, numOfRows, start, end, data, compareFn); #ifdef _DEBUG_VIEW - printf("%s called: %d\n", __FUNCTION__, qsort_call++); +// printf("%s called: %d\n", __FUNCTION__, qsort_call++); #endif UNUSED(qsort_call); @@ -1141,17 +683,17 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta } if (ret == 0 && e != end_same) { - swap(pDescriptor, numOfRows, e, data, end_same--); + swap(pDescriptor->pColumnModel, numOfRows, e, data, end_same--); } e--; } if (e != s) { - swap(pDescriptor, numOfRows, s, data, e); + swap(pDescriptor->pColumnModel, numOfRows, s, data, e); } #ifdef _DEBUG_VIEW - tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); #endif while (s < e) { @@ -1161,16 +703,16 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta } if (ret == 0 && s != start_same) { - swap(pDescriptor, numOfRows, s, data, start_same++); + swap(pDescriptor->pColumnModel, numOfRows, s, data, start_same++); } s++; } if (s != e) { - swap(pDescriptor, numOfRows, s, data, e); + swap(pDescriptor->pColumnModel, numOfRows, s, data, e); } #ifdef _DEBUG_VIEW - tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); #endif } @@ -1180,14 +722,14 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta int32_t right = end; while (right > end_same && left <= end_same) { - swap(pDescriptor, numOfRows, left++, data, right--); + swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--); } // (pivotal+1) + steps of number that are identical pivotal rightx += (end - end_same); #ifdef _DEBUG_VIEW - tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); #endif } @@ -1197,14 +739,14 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta int32_t right = e - 1; while (left < start_same && right >= start_same) { - swap(pDescriptor, numOfRows, left++, data, right--); + swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--); } // (pivotal-1) - steps of number that are identical pivotal leftx -= (start_same - start); #ifdef _DEBUG_VIEW - tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); #endif } @@ -1217,142 +759,50 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta } } -tExtMemBuffer *releaseBucketsExceptFor(tMemBucket *pMemBucket, int16_t segIdx, int16_t slotIdx) { - tExtMemBuffer *pBuffer = NULL; - - for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; - - for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { - if (i == segIdx && j == slotIdx) { - pBuffer = pSeg->pBuffer[j]; - } else { - if (pSeg->pBuffer && pSeg->pBuffer[j]) { - tExtMemBufferDestroy(&pSeg->pBuffer[j]); - } - } - } - } - - return pBuffer; -} - -static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx, - tOrderDescriptor *pDesc) { - // release all data in other slots - tExtMemBuffer *pMemBuffer = pMemBucket->pSegs[segIdx].pBuffer[slotIdx]; - tFilePage * buffer = (tFilePage *)calloc(1, pMemBuffer->nElemSize * pMemBuffer->numOfAllElems + sizeof(tFilePage)); - int32_t oldCapacity = pDesc->pSchema->maxCapacity; - pDesc->pSchema->maxCapacity = pMemBuffer->numOfAllElems; - - if (!tExtMemBufferIsAllDataInMem(pMemBuffer)) { - pMemBuffer = releaseBucketsExceptFor(pMemBucket, segIdx, slotIdx); - assert(pMemBuffer->numOfAllElems > 0); - - // load data in disk to memory - tFilePage *pPage = (tFilePage *)calloc(1, pMemBuffer->nPageSize); - - for (int32_t i = 0; i < pMemBuffer->fileMeta.flushoutData.nLength; ++i) { - tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[i]; - - int32_t ret = fseek(pMemBuffer->dataFile, pFlushInfo->startPageId * pMemBuffer->nPageSize, SEEK_SET); - UNUSED(ret); - - for (uint32_t j = 0; j < pFlushInfo->numOfPages; ++j) { - ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); - assert(pPage->numOfElems > 0); - - tColModelAppend(pDesc->pSchema, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems); - printf("id: %d count: %d\n", j, buffer->numOfElems); - } - } - tfree(pPage); - - assert(buffer->numOfElems == pMemBuffer->fileMeta.numOfElemsInFile); +/* + * deep copy of sschema + */ +SColumnModel *createColumnModel(SSchema *fields, int32_t numOfCols, int32_t blockCapacity) { + SColumnModel *pColumnModel = (SColumnModel *)calloc(1, sizeof(SColumnModel) + numOfCols * sizeof(SSchemaEx)); + if (pColumnModel == NULL) { + return NULL; } - // load data in pMemBuffer to buffer - tFilePagesItem *pListItem = pMemBuffer->pHead; - while (pListItem != NULL) { - tColModelAppend(pDesc->pSchema, buffer, pListItem->item.data, 0, pListItem->item.numOfElems, - pListItem->item.numOfElems); - pListItem = pListItem->pNext; + pColumnModel->pFields = (SSchemaEx *)(&pColumnModel[1]); + + for(int32_t i = 0; i < numOfCols; ++i) { + SSchemaEx* pSchemaEx = &pColumnModel->pFields[i]; + pSchemaEx->field = fields[i]; + pSchemaEx->offset = pColumnModel->rowSize; + + pColumnModel->rowSize += pSchemaEx->field.bytes; } - tColDataQSort(pDesc, buffer->numOfElems, 0, buffer->numOfElems - 1, buffer->data, TSQL_SO_ASC); - - pDesc->pSchema->maxCapacity = oldCapacity; // restore value - return buffer; -} + pColumnModel->numOfCols = numOfCols; + pColumnModel->capacity = blockCapacity; -double findOnlyResult(tMemBucket *pMemBucket) { - assert(pMemBucket->numOfElems == 1); - - for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; - if (pSeg->pBuffer) { - for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { - tExtMemBuffer *pBuffer = pSeg->pBuffer[j]; - if (pBuffer) { - assert(pBuffer->numOfAllElems == 1); - tFilePage *pPage = &pBuffer->pHead->item; - if (pBuffer->numOfElemsInBuffer == 1) { - switch (pMemBucket->dataType) { - case TSDB_DATA_TYPE_INT: - return *(int32_t *)pPage->data; - case TSDB_DATA_TYPE_SMALLINT: - return *(int16_t *)pPage->data; - case TSDB_DATA_TYPE_TINYINT: - return *(int8_t *)pPage->data; - case TSDB_DATA_TYPE_BIGINT: - return (double)(*(int64_t *)pPage->data); - case TSDB_DATA_TYPE_DOUBLE: { - double dv = GET_DOUBLE_VAL(pPage->data); - //return *(double *)pPage->data; - return dv; - } - case TSDB_DATA_TYPE_FLOAT: { - float fv = GET_FLOAT_VAL(pPage->data); - //return *(float *)pPage->data; - return fv; - } - default: - return 0; - } - } - } - } - } - } - return 0; + return pColumnModel; } -/* - * deep copy of sschema - */ -tColModel *tColModelCreate(SSchema *field, int32_t numOfCols, int32_t maxCapacity) { - tColModel *pSchema = - (tColModel *)calloc(1, sizeof(tColModel) + numOfCols * sizeof(SSchema) + numOfCols * sizeof(int16_t)); - if (pSchema == NULL) { +SColumnModel *cloneColumnModel(SColumnModel *pSrc) { + if (pSrc == NULL) { return NULL; } - - pSchema->pFields = (SSchema *)(&pSchema[1]); - memcpy(pSchema->pFields, field, sizeof(SSchema) * numOfCols); - - pSchema->colOffset = (int16_t *)(&pSchema->pFields[numOfCols]); - pSchema->colOffset[0] = 0; - for (int32_t i = 1; i < numOfCols; ++i) { - pSchema->colOffset[i] = pSchema->colOffset[i - 1] + pSchema->pFields[i - 1].bytes; + + SColumnModel *pColumnModel = (SColumnModel *)calloc(1, sizeof(SColumnModel) + pSrc->numOfCols * sizeof(SSchemaEx)); + if (pColumnModel == NULL) { + return NULL; } - - pSchema->numOfCols = numOfCols; - pSchema->maxCapacity = maxCapacity; - - return pSchema; + + *pColumnModel = *pSrc; + + pColumnModel->pFields = (SSchemaEx*) (&pColumnModel[1]); + memcpy(pColumnModel->pFields, pSrc->pFields, pSrc->numOfCols * sizeof(SSchemaEx)); + + return pColumnModel; } -void tColModelDestroy(tColModel *pModel) { +void destroyColumnModel(SColumnModel *pModel) { if (pModel == NULL) { return; } @@ -1375,10 +825,16 @@ static void printBinaryData(char *data, int32_t len) { } if (len == 50) { // probably the avg intermediate result - printf("%lf,%d\t", *(double *)data, *(int64_t *)(data + sizeof(double))); + printf("%lf,%" PRId64 "\t", *(double *)data, *(int64_t *)(data + sizeof(double))); } else if (data[8] == ',') { // in TSDB_FUNC_FIRST_DST/TSDB_FUNC_LAST_DST, // the value is seperated by ',' - printf("%ld,%0x\t", *(int64_t *)data, data + sizeof(int64_t) + 1); + //printf("%" PRId64 ",%0x\t", *(int64_t *)data, data + sizeof(int64_t) + 1); + printf("%" PRId64 ", HEX: ", *(int64_t *)data); + int32_t tmp_len = len - sizeof(int64_t) - 1; + for (int32_t i = 0; i < tmp_len; ++i) { + printf("%0x ", *(data + sizeof(int64_t) + 1 + i)); + } + printf("\t"); } else if (isCharString) { printf("%s\t", data); } @@ -1388,26 +844,26 @@ static void printBinaryData(char *data, int32_t len) { static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) { if (param->functionId == TSDB_FUNC_LAST_DST) { switch (param->type) { - case TSDB_DATA_TYPE_TINYINT:printf("%lld,%d\t", *(int64_t *) data, *(int8_t *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_TINYINT:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int8_t *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_SMALLINT:printf("%lld,%d\t", *(int64_t *) data, *(int16_t *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_SMALLINT:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int16_t *) (data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT:printf("%lld,%lld\t", *(int64_t *) data, *(int64_t *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_BIGINT:printf("%" PRId64 ",%" PRId64 "\t", *(int64_t *) data, *(int64_t *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_FLOAT:printf("%lld,%d\t", *(int64_t *) data, *(float *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_FLOAT:printf("%" PRId64 ",%f\t", *(int64_t *) data, *(float *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_DOUBLE:printf("%lld,%d\t", *(int64_t *) data, *(double *) (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_DOUBLE:printf("%" PRId64 ",%f\t", *(int64_t *) data, *(double *) (data + TSDB_KEYSIZE + 1)); break; - case TSDB_DATA_TYPE_BINARY:printf("%lld,%s\t", *(int64_t *) data, (data + TSDB_KEYSIZE + 1)); + case TSDB_DATA_TYPE_BINARY:printf("%" PRId64 ",%s\t", *(int64_t *) data, (data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_INT: - default:printf("%lld,%d\t", *(int64_t *) data, *(int32_t *) (data + TSDB_KEYSIZE + 1)); + default:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int32_t *) (data + TSDB_KEYSIZE + 1)); break; } } else if (param->functionId == TSDB_FUNC_AVG) { - printf("%f,%lld\t", *(double *) data, *(int64_t *) (data + sizeof(double) + 1)); + printf("%f,%" PRId64 "\t", *(double *) data, *(int64_t *) (data + sizeof(double) + 1)); } else { // functionId == TSDB_FUNC_MAX_DST | TSDB_FUNC_TAG switch (param->type) { @@ -1419,13 +875,13 @@ static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) { break; case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); break; case TSDB_DATA_TYPE_FLOAT: - printf("%d\t", *(float *)data); + printf("%f\t", *(float *)data); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%d\t", *(double *)data); + printf("%f\t", *(double *)data); break; case TSDB_DATA_TYPE_BINARY: printf("%s\t", data); @@ -1433,41 +889,41 @@ static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) { case TSDB_DATA_TYPE_INT: default: - printf("%d\t", *(double *)data); + printf("%f\t", *(double *)data); break; } } } -void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t totalCapacity) { +void tColModelDisplay(SColumnModel *pModel, void *pData, int32_t numOfRows, int32_t totalCapacity) { for (int32_t i = 0; i < numOfRows; ++i) { for (int32_t j = 0; j < pModel->numOfCols; ++j) { char *val = COLMODEL_GET_VAL((char *)pData, pModel, totalCapacity, i, j); - int type = pModel->pFields[j].type; + int type = pModel->pFields[j].field.type; printf("type:%d ", type); switch (type) { case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int32_t *)val); break; case TSDB_DATA_TYPE_NCHAR: { char buf[4096] = {0}; - taosUcs4ToMbs(val, pModel->pFields[j].bytes, buf); + taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); printf("%s\t", buf); } case TSDB_DATA_TYPE_BINARY: { - printBinaryData(val, pModel->pFields[j].bytes); + printBinaryData(val, pModel->pFields[j].field.bytes); break; } case TSDB_DATA_TYPE_DOUBLE: printf("%lf\t", *(double *)val); break; case TSDB_DATA_TYPE_TIMESTAMP: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_TINYINT: printf("%d\t", *(int8_t *)val); @@ -1490,35 +946,35 @@ void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t printf("\n"); } -void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32_t totalCapacity, +void tColModelDisplayEx(SColumnModel *pModel, void *pData, int32_t numOfRows, int32_t totalCapacity, SSrcColumnInfo *param) { for (int32_t i = 0; i < numOfRows; ++i) { for (int32_t j = 0; j < pModel->numOfCols; ++j) { char *val = COLMODEL_GET_VAL((char *)pData, pModel, totalCapacity, i, j); - printf("type:%d\t", pModel->pFields[j].type); + printf("type:%d\t", pModel->pFields[j].field.type); - switch (pModel->pFields[j].type) { + switch (pModel->pFields[j].field.type) { case TSDB_DATA_TYPE_BIGINT: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int32_t *)val); break; case TSDB_DATA_TYPE_NCHAR: { char buf[128] = {0}; - taosUcs4ToMbs(val, pModel->pFields[j].bytes, buf); + taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); printf("%s\t", buf); } case TSDB_DATA_TYPE_BINARY: { - printBinaryDataEx(val, pModel->pFields[j].bytes, ¶m[j]); + printBinaryDataEx(val, pModel->pFields[j].field.bytes, ¶m[j]); break; } case TSDB_DATA_TYPE_DOUBLE: printf("%lf\t", *(double *)val); break; case TSDB_DATA_TYPE_TIMESTAMP: - printf("%lld\t", *(int64_t *)val); + printf("%" PRId64 "\t", *(int64_t *)val); break; case TSDB_DATA_TYPE_TINYINT: printf("%d\t", *(int8_t *)val); @@ -1542,20 +998,31 @@ void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32 } //////////////////////////////////////////////////////////////////////////////////////////// -void tColModelCompact(tColModel *pModel, tFilePage *inputBuffer, int32_t maxElemsCapacity) { +void tColModelCompact(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxElemsCapacity) { if (inputBuffer->numOfElems == 0 || maxElemsCapacity == inputBuffer->numOfElems) { return; } /* start from the second column */ for (int32_t i = 1; i < pModel->numOfCols; ++i) { - memmove(inputBuffer->data + pModel->colOffset[i] * inputBuffer->numOfElems, - inputBuffer->data + pModel->colOffset[i] * maxElemsCapacity, - pModel->pFields[i].bytes * inputBuffer->numOfElems); + SSchemaEx* pSchemaEx = &pModel->pFields[i]; + memmove(inputBuffer->data + pSchemaEx->offset * inputBuffer->numOfElems, + inputBuffer->data + pSchemaEx->offset * maxElemsCapacity, + pSchemaEx->field.bytes * inputBuffer->numOfElems); } } -void tColModelErase(tColModel *pModel, tFilePage *inputBuffer, int32_t maxCapacity, int32_t s, int32_t e) { +SSchema* getColumnModelSchema(SColumnModel *pColumnModel, int32_t index) { + assert(pColumnModel != NULL && index >= 0 && index < pColumnModel->numOfCols); + return &pColumnModel->pFields[index].field; +} + +int16_t getColumnModelOffset(SColumnModel *pColumnModel, int32_t index) { + assert(pColumnModel != NULL && index >= 0 && index < pColumnModel->numOfCols); + return pColumnModel->pFields[index].offset; +} + +void tColModelErase(SColumnModel *pModel, tFilePage *inputBuffer, int32_t blockCapacity, int32_t s, int32_t e) { if (inputBuffer->numOfElems == 0 || (e - s + 1) <= 0) { return; } @@ -1566,10 +1033,13 @@ void tColModelErase(tColModel *pModel, tFilePage *inputBuffer, int32_t maxCapaci /* start from the second column */ for (int32_t i = 0; i < pModel->numOfCols; ++i) { - char *startPos = inputBuffer->data + pModel->colOffset[i] * maxCapacity + s * pModel->pFields[i].bytes; - char *endPos = startPos + pModel->pFields[i].bytes * removed; + int16_t offset = getColumnModelOffset(pModel, i); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + char *startPos = inputBuffer->data + offset * blockCapacity + s * pSchema->bytes; + char *endPos = startPos + pSchema->bytes * removed; - memmove(startPos, endPos, pModel->pFields[i].bytes * secPart); + memmove(startPos, endPos, pSchema->bytes * secPart); } inputBuffer->numOfElems = remain; @@ -1582,31 +1052,31 @@ void tColModelErase(tColModel *pModel, tFilePage *inputBuffer, int32_t maxCapaci * data in srcData must has the same schema as data in dstPage, that can be * described by dstModel */ -void tColModelAppend(tColModel *dstModel, tFilePage *dstPage, void *srcData, int32_t start, int32_t numOfRows, +void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t start, int32_t numOfRows, int32_t srcCapacity) { - assert(dstPage->numOfElems + numOfRows <= dstModel->maxCapacity); + assert(dstPage->numOfElems + numOfRows <= dstModel->capacity); for (int32_t col = 0; col < dstModel->numOfCols; ++col) { - char *dst = COLMODEL_GET_VAL(dstPage->data, dstModel, dstModel->maxCapacity, dstPage->numOfElems, col); + char *dst = COLMODEL_GET_VAL(dstPage->data, dstModel, dstModel->capacity, dstPage->numOfElems, col); char *src = COLMODEL_GET_VAL((char *)srcData, dstModel, srcCapacity, start, col); - memmove(dst, src, dstModel->pFields[col].bytes * numOfRows); + memmove(dst, src, dstModel->pFields[col].field.bytes * numOfRows); } dstPage->numOfElems += numOfRows; } -tOrderDescriptor *tOrderDesCreate(int32_t *orderColIdx, int32_t numOfOrderCols, tColModel *pModel, +tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrderCols, SColumnModel *pModel, int32_t tsOrderType) { - tOrderDescriptor *desc = (tOrderDescriptor *)malloc(sizeof(tOrderDescriptor) + sizeof(int32_t) * numOfOrderCols); + tOrderDescriptor *desc = (tOrderDescriptor *)calloc(1, sizeof(tOrderDescriptor) + sizeof(int32_t) * numOfOrderCols); if (desc == NULL) { return NULL; } - desc->pSchema = pModel; + desc->pColumnModel = pModel; desc->tsOrder = tsOrderType; - desc->orderIdx.numOfOrderedCols = numOfOrderCols; + desc->orderIdx.numOfCols = numOfOrderCols; for (int32_t i = 0; i < numOfOrderCols; ++i) { desc->orderIdx.pData[i] = orderColIdx[i]; } @@ -1619,389 +1089,6 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) { return; } - tColModelDestroy(pDesc->pSchema); + destroyColumnModel(pDesc->pColumnModel); tfree(pDesc); } - -//////////////////////////////////////////////////////////////////////////////////////////// -static void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) { - *minVal = DBL_MAX; - *maxVal = -DBL_MAX; - - for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; - if (pSeg->pBuffer == NULL) { - continue; - } - switch (pMemBucket->dataType) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_TINYINT: { - for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { - double minv = pSeg->pBoundingEntries[j].iMinVal; - double maxv = pSeg->pBoundingEntries[j].iMaxVal; - - if (*minVal > minv) { - *minVal = minv; - } - if (*maxVal < maxv) { - *maxVal = maxv; - } - } - break; - } - case TSDB_DATA_TYPE_DOUBLE: - case TSDB_DATA_TYPE_FLOAT: { - for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { - double minv = pSeg->pBoundingEntries[j].dMinVal; - double maxv = pSeg->pBoundingEntries[j].dMaxVal; - - if (*minVal > minv) { - *minVal = minv; - } - if (*maxVal < maxv) { - *maxVal = maxv; - } - } - break; - } - case TSDB_DATA_TYPE_BIGINT: { - for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { - double minv = (double)pSeg->pBoundingEntries[j].i64MinVal; - double maxv = (double)pSeg->pBoundingEntries[j].i64MaxVal; - - if (*minVal > minv) { - *minVal = minv; - } - if (*maxVal < maxv) { - *maxVal = maxv; - } - } - break; - } - } - } -} - -static MinMaxEntry getMinMaxEntryOfNearestSlotInNextSegment(tMemBucket *pMemBucket, int32_t segIdx) { - int32_t i = segIdx + 1; - while (i < pMemBucket->numOfSegs && pMemBucket->pSegs[i].numOfSlots == 0) ++i; - - tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; - assert(pMemBucket->numOfSegs > i && pMemBucket->pSegs[i].pBuffer != NULL); - - i = 0; - while (i < pMemBucket->nSlotsOfSeg && pSeg->pBuffer[i] == NULL) ++i; - - assert(i < pMemBucket->nSlotsOfSeg); - return pSeg->pBoundingEntries[i]; -} - -/* - * - * now, we need to find the minimum value of the next slot for - * interpolating the percentile value - * j is the last slot of current segment, we need to get the first - * slot of the next segment. - */ -static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; - - MinMaxEntry next; - if (slotIdx == pSeg->numOfSlots - 1) { // find next segment with data - return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx); - } else { - int32_t j = slotIdx + 1; - for (; j < pMemBucket->nSlotsOfSeg && pMemBucket->pSegs[segIdx].pBuffer[j] == 0; ++j) { - }; - - if (j == pMemBucket->nSlotsOfSeg) { // current slot has no available - // slot,try next segment - return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx); - } else { - next = pSeg->pBoundingEntries[slotIdx + 1]; - assert(pSeg->pBuffer[slotIdx + 1] != NULL); - } - } - - return next; -} - -bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx); -char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage); - -double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) { - int32_t num = 0; - - for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; - for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { - if (pSeg->pBuffer == NULL || pSeg->pBuffer[j] == NULL) { - continue; - } - // required value in current slot - if (num < (count + 1) && num + pSeg->pBuffer[j]->numOfAllElems >= (count + 1)) { - if (pSeg->pBuffer[j]->numOfAllElems + num == (count + 1)) { - /* - * now, we need to find the minimum value of the next slot for interpolating the percentile value - * j is the last slot of current segment, we need to get the first slot of the next segment. - * - */ - MinMaxEntry next = getMinMaxEntryOfNextSlotWithData(pMemBucket, i, j); - - double maxOfThisSlot = 0; - double minOfNextSlot = 0; - switch (pMemBucket->dataType) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_TINYINT: { - maxOfThisSlot = pSeg->pBoundingEntries[j].iMaxVal; - minOfNextSlot = next.iMinVal; - break; - }; - case TSDB_DATA_TYPE_FLOAT: - case TSDB_DATA_TYPE_DOUBLE: { - maxOfThisSlot = pSeg->pBoundingEntries[j].dMaxVal; - minOfNextSlot = next.dMinVal; - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - maxOfThisSlot = (double)pSeg->pBoundingEntries[j].i64MaxVal; - minOfNextSlot = (double)next.i64MinVal; - break; - } - }; - - assert(minOfNextSlot > maxOfThisSlot); - - double val = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot; - return val; - } - if (pSeg->pBuffer[j]->numOfAllElems <= pMemBucket->maxElemsCapacity) { - // data in buffer and file are merged together to be processed. - tFilePage *buffer = loadIntoBucketFromDisk(pMemBucket, i, j, pMemBucket->pOrderDesc); - int32_t currentIdx = count - num; - - char * thisVal = buffer->data + pMemBucket->nElemSize * currentIdx; - char * nextVal = thisVal + pMemBucket->nElemSize; - double td, nd; - switch (pMemBucket->dataType) { - case TSDB_DATA_TYPE_SMALLINT: { - td = *(int16_t *)thisVal; - nd = *(int16_t *)nextVal; - break; - } - case TSDB_DATA_TYPE_TINYINT: { - td = *(int8_t *)thisVal; - nd = *(int8_t *)nextVal; - break; - } - case TSDB_DATA_TYPE_INT: { - td = *(int32_t *)thisVal; - nd = *(int32_t *)nextVal; - break; - }; - case TSDB_DATA_TYPE_FLOAT: { - //td = *(float *)thisVal; - //nd = *(float *)nextVal; - td = GET_FLOAT_VAL(thisVal); - nd = GET_FLOAT_VAL(nextVal); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - //td = *(double *)thisVal; - td = GET_DOUBLE_VAL(thisVal); - //nd = *(double *)nextVal; - nd = GET_DOUBLE_VAL(nextVal); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - td = (double)*(int64_t *)thisVal; - nd = (double)*(int64_t *)nextVal; - break; - } - } - double val = (1 - fraction) * td + fraction * nd; - tfree(buffer); - - return val; - } else { // incur a second round bucket split - if (isIdenticalData(pMemBucket, i, j)) { - tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j]; - - tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->nPageSize); - - char *thisVal = getFirstElemOfMemBuffer(pSeg, j, pPage); - - double finalResult = 0.0; - - switch (pMemBucket->dataType) { - case TSDB_DATA_TYPE_SMALLINT: { - finalResult = *(int16_t *)thisVal; - break; - } - case TSDB_DATA_TYPE_TINYINT: { - finalResult = *(int8_t *)thisVal; - break; - } - case TSDB_DATA_TYPE_INT: { - finalResult = *(int32_t *)thisVal; - break; - }; - case TSDB_DATA_TYPE_FLOAT: { - //finalResult = *(float *)thisVal; - finalResult = GET_FLOAT_VAL(thisVal); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - //finalResult = *(double *)thisVal; - finalResult = GET_DOUBLE_VAL(thisVal); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - finalResult = (double)(*(int64_t *)thisVal); - break; - } - } - - free(pPage); - return finalResult; - } - - pTrace("MemBucket:%p,start second round bucketing", pMemBucket); - - if (pSeg->pBuffer[j]->numOfElemsInBuffer != 0) { - pTrace("MemBucket:%p,flush %d pages to disk, clear status", pMemBucket, pSeg->pBuffer[j]->numOfPagesInMem); - - pMemBucket->numOfAvailPages += pSeg->pBuffer[j]->numOfPagesInMem; - tExtMemBufferFlush(pSeg->pBuffer[j]); - } - - tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j]; - pSeg->pBuffer[j] = NULL; - - // release all - for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt]; - for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) { - if (pSeg->pBuffer && pSeg->pBuffer[ttx]) { - tExtMemBufferDestroy(&pSeg->pBuffer[ttx]); - } - } - } - - pMemBucket->nRange.i64MaxVal = pSeg->pBoundingEntries->i64MaxVal; - pMemBucket->nRange.i64MinVal = pSeg->pBoundingEntries->i64MinVal; - pMemBucket->numOfElems = 0; - - for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt]; - for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) { - if (pSeg->pBoundingEntries) { - resetBoundingBox(pSeg, pMemBucket->dataType); - } - } - } - - tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->nPageSize); - - tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0]; - assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize); - - int32_t ret = fseek(pMemBuffer->dataFile, pFlushInfo->startPageId * pMemBuffer->nPageSize, SEEK_SET); - UNUSED(ret); - - for (uint32_t jx = 0; jx < pFlushInfo->numOfPages; ++jx) { - ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); - tMemBucketPut(pMemBucket, pPage->data, pPage->numOfElems); - } - - fclose(pMemBuffer->dataFile); - if (unlink(pMemBuffer->dataFilePath) != 0) { - pError("MemBucket:%p,remove tmp file %s failed", pMemBucket, pMemBuffer->dataFilePath); - } - tfree(pMemBuffer); - tfree(pPage); - - return getPercentileImpl(pMemBucket, count - num, fraction); - } - } else { - num += pSeg->pBuffer[j]->numOfAllElems; - } - } - } - return 0; -} - -double getPercentile(tMemBucket *pMemBucket, double percent) { - if (pMemBucket->numOfElems == 0) { - return 0.0; - } - - if (pMemBucket->numOfElems == 1) { // return the only element - return findOnlyResult(pMemBucket); - } - - percent = fabs(percent); - - // validate the parameters - if (fabs(percent - 100.0) < DBL_EPSILON || (percent < DBL_EPSILON)) { - double minx = 0, maxx = 0; - /* - * find the min/max value, no need to scan all data in bucket - */ - findMaxMinValue(pMemBucket, &maxx, &minx); - - return fabs(percent - 100) < DBL_EPSILON ? maxx : minx; - } - - double percentVal = (percent * (pMemBucket->numOfElems - 1)) / ((double)100.0); - int32_t orderIdx = (int32_t)percentVal; - - // do put data by using buckets - return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx); -} - -/* - * check if data in one slot are all identical - * only need to compare with the bounding box - */ -bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { - tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; - - if (pMemBucket->dataType == TSDB_DATA_TYPE_INT || pMemBucket->dataType == TSDB_DATA_TYPE_BIGINT || - pMemBucket->dataType == TSDB_DATA_TYPE_SMALLINT || pMemBucket->dataType == TSDB_DATA_TYPE_TINYINT) { - return pSeg->pBoundingEntries[slotIdx].i64MinVal == pSeg->pBoundingEntries[slotIdx].i64MaxVal; - } - - if (pMemBucket->dataType == TSDB_DATA_TYPE_FLOAT || pMemBucket->dataType == TSDB_DATA_TYPE_DOUBLE) { - return fabs(pSeg->pBoundingEntries[slotIdx].dMaxVal - pSeg->pBoundingEntries[slotIdx].dMinVal) < DBL_EPSILON; - } - - return false; -} - -/* - * get the first element of one slot into memory. - * if no data of current slot in memory, load it from disk - */ -char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage) { - tExtMemBuffer *pMemBuffer = pSeg->pBuffer[slotIdx]; - char * thisVal = NULL; - - if (pSeg->pBuffer[slotIdx]->numOfElemsInBuffer != 0) { - thisVal = pSeg->pBuffer[slotIdx]->pHead->item.data; - } else { - /* - * no data in memory, load one page into memory - */ - tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0]; - assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize); - - fseek(pMemBuffer->dataFile, pFlushInfo->startPageId * pMemBuffer->nPageSize, SEEK_SET); - size_t ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); - UNUSED(ret); - thisVal = pPage->data; - } - return thisVal; -} diff --git a/src/util/src/tglobalcfg.c b/src/util/src/tglobalcfg.c index f888537a0f4288ad70e05934f4dc78c427747f1c..45efcad56394b1507c60d383a60199d0b96afb02 100644 --- a/src/util/src/tglobalcfg.c +++ b/src/util/src/tglobalcfg.c @@ -75,16 +75,25 @@ int tsMetricMetaKeepTimer = 600; // second float tsNumOfThreadsPerCore = 1.0; float tsRatioOfQueryThreads = 0.5; char tsPublicIp[TSDB_IPv4ADDR_LEN] = {0}; -char tsInternalIp[TSDB_IPv4ADDR_LEN] = {0}; char tsPrivateIp[TSDB_IPv4ADDR_LEN] = {0}; -char tsServerIpStr[TSDB_IPv4ADDR_LEN] = "127.0.0.1"; short tsNumOfVnodesPerCore = 8; short tsNumOfTotalVnodes = 0; short tsCheckHeaderFile = 0; +#ifdef _TD_ARM_32_ +int tsSessionsPerVnode = 100; +#else int tsSessionsPerVnode = 1000; +#endif + int tsCacheBlockSize = 16384; // 256 columns int tsAverageCacheBlocks = TSDB_DEFAULT_AVG_BLOCKS; +/** + * Change the meaning of affected rows: + * 0: affected rows not include those duplicate records + * 1: affected rows include those duplicate records + */ +short tsAffectedRowsMod = 0; int tsRowsInFileBlock = 4096; float tsFileBlockMinPercent = 0.05; @@ -119,11 +128,16 @@ int tsBalanceMonitorInterval = 2; // seconds int tsBalanceStartInterval = 300; // seconds int tsBalancePolicy = 0; // 1-use sys.montor int tsOfflineThreshold = 864000; // seconds 10days -int tsMgmtEqualVnodeNum = 0; +int tsMgmtEqualVnodeNum = 4; int tsEnableHttpModule = 1; int tsEnableMonitorModule = 1; int tsRestRowLimit = 10240; +int tsMaxSQLStringLen = TSDB_MAX_SQL_LEN; + +// the maximum number of results for projection query on super table that are returned from +// one virtual node, to order according to timestamp +int tsMaxNumOfOrderedResults = 100000; /* * denote if the server needs to compress response message at the application layer to client, including query rsp, @@ -135,18 +149,29 @@ int tsRestRowLimit = 10240; */ int tsCompressMsgSize = -1; -char tsSocketType[4] = "udp"; // use UDP by default[option: udp, tcp] -int tsTimePrecision = TSDB_TIME_PRECISION_MILLI; // time precision, millisecond by default -int tsMinSlidingTime = 10; // 10 ms for sliding time, the value will changed in - // case of time precision changed -int tsMinIntervalTime = 10; // 10 ms for interval time range, changed accordingly -int tsMaxStreamComputDelay = 20000; // 20sec, the maximum value of stream - // computing delay, changed accordingly -int tsStreamCompStartDelay = 10000; // 10sec, the first stream computing delay - // time after system launched successfully, - // changed accordingly -int tsStreamCompRetryDelay = 10; // the stream computing delay time after - // executing failed, change accordingly +// use UDP by default[option: udp, tcp] +char tsSocketType[4] = "udp"; + +// time precision, millisecond by default +int tsTimePrecision = TSDB_TIME_PRECISION_MILLI; + +// 10 ms for sliding time, the value will changed in case of time precision changed +int tsMinSlidingTime = 10; + +// 10 ms for interval time range, changed accordingly +int tsMinIntervalTime = 10; + +// 20sec, the maximum value of stream computing delay, changed accordingly +int tsMaxStreamComputDelay = 20000; + +// 10sec, the first stream computing delay time after system launched successfully, changed accordingly +int tsStreamCompStartDelay = 10000; + +// the stream computing delay time after executing failed, change accordingly +int tsStreamCompRetryDelay = 10; + +// The delayed computing ration. 10% of the whole computing time window by default. +float tsStreamComputDelayRatio = 0.1; int tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance @@ -160,10 +185,17 @@ int tsHttpMaxThreads = 2; int tsHttpEnableCompress = 0; int tsHttpEnableRecordSql = 0; int tsTelegrafUseFieldNum = 0; -int tsAdminRowLimit = 10240; int tsTscEnableRecordSql = 0; int tsEnableCoreFile = 0; +int tsAnyIp = 1; +uint32_t tsPublicIpInt = 0; + +#ifdef CLUSTER +int tsIsCluster = 1; +#else +int tsIsCluster = 0; +#endif int tsRpcTimer = 300; int tsRpcMaxTime = 600; // seconds; @@ -434,23 +466,17 @@ static void doInitGlobalConfig() { // ip address tsInitConfigOption(cfg++, "masterIp", tsMasterIp, TSDB_CFG_VTYPE_IPSTR, - TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_CLUSTER, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "secondIp", tsSecondIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_CLUSTER, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); - tsInitConfigOption(cfg++, "serverIp", tsServerIpStr, TSDB_CFG_VTYPE_IPSTR, - TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_LITE, - 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "publicIp", tsPublicIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "privateIp", tsPrivateIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); - tsInitConfigOption(cfg++, "internalIp", tsInternalIp, TSDB_CFG_VTYPE_IPSTR, - TSDB_CFG_CTYPE_B_CONFIG, - 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "localIp", tsLocalIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); @@ -535,6 +561,9 @@ static void doInitGlobalConfig() { tsInitConfigOption(cfg++, "alternativeRole", &tsAlternativeRole, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, 0, 2, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "affectedRowsMod", &tsAffectedRowsMod, TSDB_CFG_VTYPE_SHORT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, + 0, 1, 0, TSDB_CFG_UTYPE_NONE); // 0-any, 1-mgmt, 2-dnode // timer @@ -613,9 +642,12 @@ static void doInitGlobalConfig() { TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1000, 1000000000, 0, TSDB_CFG_UTYPE_MS); tsInitConfigOption(cfg++, "retryStreamCompDelay", &tsStreamCompRetryDelay, TSDB_CFG_VTYPE_INT, - TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, - 10, 1000000000, 0, TSDB_CFG_UTYPE_MS); - + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 1000000000, 0, TSDB_CFG_UTYPE_MS); + + + tsInitConfigOption(cfg++, "streamCompDelayRatio", &tsStreamComputDelayRatio, TSDB_CFG_VTYPE_FLOAT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0.1, 0.9, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "clog", &tsCommitLog, TSDB_CFG_VTYPE_SHORT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 1, 0, TSDB_CFG_UTYPE_NONE); @@ -653,7 +685,15 @@ static void doInitGlobalConfig() { tsInitConfigOption(cfg++, "compressMsgSize", &tsCompressMsgSize, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, -1, 10000000, 0, TSDB_CFG_UTYPE_NONE); - + + tsInitConfigOption(cfg++, "maxSQLLength", &tsMaxSQLStringLen, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, + TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_BYTE); + + tsInitConfigOption(cfg++, "maxNumOfOrderedRes", &tsMaxNumOfOrderedResults, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW, + TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_NONE); + // locale & charset tsInitConfigOption(cfg++, "timezone", tsTimezone, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, @@ -709,7 +749,7 @@ static void doInitGlobalConfig() { 1, 100000, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "httpEnableRecordSql", &tsHttpEnableRecordSql, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, - 1, 100000, 0, TSDB_CFG_UTYPE_NONE); + 0, 1, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "telegrafUseFieldNum", &tsTelegrafUseFieldNum, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 1, 1, TSDB_CFG_UTYPE_NONE); @@ -773,12 +813,16 @@ static void doInitGlobalConfig() { tsInitConfigOption(cfg++, "tscEnableRecordSql", &tsTscEnableRecordSql, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, - 1, 100000, 0, TSDB_CFG_UTYPE_NONE); + 0, 1, 0, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "enableCoreFile", &tsEnableCoreFile, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, - 1, 100000, 0, TSDB_CFG_UTYPE_NONE); - + 0, 1, 0, TSDB_CFG_UTYPE_NONE); + + tsInitConfigOption(cfg++, "anyIp", &tsAnyIp, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, + 0, 1, 0, TSDB_CFG_UTYPE_NONE); + // version info tsInitConfigOption(cfg++, "gitinfo", gitinfo, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, @@ -906,10 +950,7 @@ bool tsReadGlobalConfig() { if (tsPublicIp[0] == 0) { strcpy(tsPublicIp, tsPrivateIp); } - - if (tsInternalIp[0] == 0) { - strcpy(tsInternalIp, tsPrivateIp); - } + tsPublicIpInt = inet_addr(tsPublicIp); if (tsLocalIp[0] == 0) { strcpy(tsLocalIp, tsPrivateIp); diff --git a/src/util/src/thashutil.c b/src/util/src/thashutil.c index b6b3ea682ef945a838f67ca227c8033624234725..cf16efe2f8e539f9611952111bafc5d4ff214d3e 100644 --- a/src/util/src/thashutil.c +++ b/src/util/src/thashutil.c @@ -8,6 +8,7 @@ * */ #include "tutil.h" +#include "hashutil.h" #define ROTL32(x, r) ((x) << (r) | (x) >> (32 - (r))) @@ -67,7 +68,7 @@ static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out) *(uint32_t *)out = h1; } -uint32_t MurmurHash3_32(const void *key, int len) { +uint32_t MurmurHash3_32(const char *key, uint32_t len) { const int32_t hashSeed = 0x12345678; uint32_t val = 0; @@ -75,3 +76,31 @@ uint32_t MurmurHash3_32(const void *key, int len) { return val; } + +uint32_t taosIntHash_32(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint32_t *)key; } +uint32_t taosIntHash_16(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint16_t *)key; } +uint32_t taosIntHash_8(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint8_t *)key; } + +uint32_t taosIntHash_64(const char *key, uint32_t UNUSED_PARAM(len)) { + uint64_t val = *(uint64_t *)key; + + uint64_t hash = val >> 16U; + hash += (val & 0xFFFFU); + + return hash; +} + +_hash_fn_t taosGetDefaultHashFunction(int32_t type) { + _hash_fn_t fn = NULL; + switch(type) { + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: fn = taosIntHash_64;break; + case TSDB_DATA_TYPE_BINARY: fn = MurmurHash3_32;break; + case TSDB_DATA_TYPE_INT: fn = taosIntHash_32; break; + case TSDB_DATA_TYPE_SMALLINT: fn = taosIntHash_16; break; + case TSDB_DATA_TYPE_TINYINT: fn = taosIntHash_8; break; + default: fn = taosIntHash_32;break; + } + + return fn; +} \ No newline at end of file diff --git a/src/util/src/thistogram.c b/src/util/src/thistogram.c index 5fef9077ea8157c347ae062db55bb257f38cd675..93046cf796220c1d0fb2a64d1b3501f254ea351d 100644 --- a/src/util/src/thistogram.c +++ b/src/util/src/thistogram.c @@ -12,7 +12,6 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - #include "os.h" #include "taosmsg.h" @@ -447,14 +446,14 @@ void tHistogramPrint(SHistogramInfo* pHisto) { printf("total entries: %d, elements: %d\n", pHisto->numOfEntries, pHisto->numOfElems); #if defined(USE_ARRAYLIST) for (int32_t i = 0; i < pHisto->numOfEntries; ++i) { - printf("%d: (%f, %lld)\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num); + printf("%d: (%f, %" PRId64 ")\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num); } #else tSkipListNode* pNode = pHisto->pList->pHead.pForward[0]; for (int32_t i = 0; i < pHisto->numOfEntries; ++i) { SHistBin* pEntry = (SHistBin*)pNode->pData; - printf("%d: (%f, %lld)\n", i + 1, pEntry->val, pEntry->num); + printf("%d: (%f, %" PRId64 ")\n", i + 1, pEntry->val, pEntry->num); pNode = pNode->pForward[0]; } #endif diff --git a/src/util/src/tinterpolation.c b/src/util/src/tinterpolation.c index ee0c7aa0097d4ae3994b6264eedd6de94b38699f..82cc52cd42ef2a4c8c40d61d40aa6e956b96a1be 100644 --- a/src/util/src/tinterpolation.c +++ b/src/util/src/tinterpolation.c @@ -37,7 +37,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t timeRange, char * here we revised the start time of day according to the local time zone, * but in case of DST, the start time of one day need to be dynamically decided. * - * TODO dynmaically decide the start time of a day + * TODO dynamically decide the start time of a day */ #if defined(WINDOWS) && _MSC_VER >= 1900 @@ -77,12 +77,24 @@ void taosInitInterpoInfo(SInterpolationInfo* pInterpoInfo, int32_t order, int64_ tfree(pInterpoInfo->prevValues); } +// the SInterpolationInfo itself will not be released +void taosDestoryInterpoInfo(SInterpolationInfo *pInterpoInfo) { + if (pInterpoInfo == NULL) { + return; + } + + tfree(pInterpoInfo->prevValues); + tfree(pInterpoInfo->nextValues); + + tfree(pInterpoInfo->pTags); +} + void taosInterpoSetStartInfo(SInterpolationInfo* pInterpoInfo, int32_t numOfRawDataInRows, int32_t type) { if (type == TSDB_INTERPO_NONE) { return; } - pInterpoInfo->rowIdx = INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? 0 : numOfRawDataInRows - 1; + pInterpoInfo->rowIdx = 0;//INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? 0 : numOfRawDataInRows - 1; pInterpoInfo->numOfRawDataInRows = numOfRawDataInRows; } @@ -106,14 +118,14 @@ int32_t taosGetNumOfResWithoutLimit(SInterpolationInfo* pInterpoInfo, int64_t* p if (numOfAvailRawData > 0) { int32_t finalNumOfResult = 0; - if (pInterpoInfo->order == TSQL_SO_ASC) { +// if (pInterpoInfo->order == TSQL_SO_ASC) { // get last timestamp, calculate the result size int64_t lastKey = pPrimaryKeyArray[pInterpoInfo->numOfRawDataInRows - 1]; - finalNumOfResult = (int32_t)((lastKey - pInterpoInfo->startTimestamp) / nInterval) + 1; - } else { // todo error less than one!!! - TSKEY lastKey = pPrimaryKeyArray[0]; - finalNumOfResult = (int32_t)((pInterpoInfo->startTimestamp - lastKey) / nInterval) + 1; - } + finalNumOfResult = (int32_t)(labs(lastKey - pInterpoInfo->startTimestamp) / nInterval) + 1; +// } else { // todo error less than one!!! +// TSKEY lastKey = pPrimaryKeyArray[0]; +// finalNumOfResult = (int32_t)((pInterpoInfo->startTimestamp - lastKey) / nInterval) + 1; +// } assert(finalNumOfResult >= numOfAvailRawData); return finalNumOfResult; @@ -186,23 +198,25 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi } static char* getPos(char* data, int32_t bytes, int32_t order, int32_t capacity, int32_t index) { - if (order == TSQL_SO_ASC) { +// if (order == TSQL_SO_ASC) { return data + index * bytes; - } else { - return data + (capacity - index - 1) * bytes; - } +// } else { +// return data + (capacity - index - 1) * bytes; +// } } -static void setTagsValueInInterpolation(tFilePage** data, char** pTags, tColModel* pModel, int32_t order, int32_t start, +static void setTagsValueInInterpolation(tFilePage** data, char** pTags, SColumnModel* pModel, int32_t order, int32_t start, int32_t capacity, int32_t num) { for (int32_t j = 0, i = start; i < pModel->numOfCols; ++i, ++j) { - char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, order, capacity, num); - assignVal(val1, pTags[j], pModel->pFields[i].bytes, pModel->pFields[i].type); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + char* val1 = getPos(data[i]->data, pSchema->bytes, order, capacity, num); + assignVal(val1, pTags[j], pSchema->bytes, pSchema->type); } } static void doInterpoResultImpl(SInterpolationInfo* pInterpoInfo, int16_t interpoType, tFilePage** data, - tColModel* pModel, int32_t* num, char** srcData, int64_t nInterval, int64_t* defaultVal, + SColumnModel* pModel, int32_t* num, char** srcData, int64_t nInterval, int64_t* defaultVal, int64_t currentTimestamp, int32_t capacity, int32_t numOfTags, char** pTags, bool outOfBound) { char** prevValues = &pInterpoInfo->prevValues; @@ -222,18 +236,23 @@ static void doInterpoResultImpl(SInterpolationInfo* pInterpoInfo, int16_t interp char* pInterpolationData = INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? *prevValues : *nextValues; if (pInterpolationData != NULL) { for (int32_t i = 1; i < numOfValCols; ++i) { - char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); + SSchema* pSchema = getColumnModelSchema(pModel, i); + int16_t offset = getColumnModelOffset(pModel, i); + + char* val1 = getPos(data[i]->data, pSchema->bytes, pInterpoInfo->order, capacity, *num); - if (isNull(pInterpolationData + pModel->colOffset[i], pModel->pFields[i].type)) { - setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + if (isNull(pInterpolationData + offset, pSchema->type)) { + setNull(val1, pSchema->type, pSchema->bytes); } else { - assignVal(val1, pInterpolationData + pModel->colOffset[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + assignVal(val1, pInterpolationData + offset, pSchema->bytes, pSchema->type); } } } else { /* no prev value yet, set the value for null */ for (int32_t i = 1; i < numOfValCols; ++i) { - char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); - setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + char* val1 = getPos(data[i]->data, pSchema->bytes, pInterpoInfo->order, capacity, *num); + setNull(val1, pSchema->type, pSchema->bytes); } } @@ -242,34 +261,41 @@ static void doInterpoResultImpl(SInterpolationInfo* pInterpoInfo, int16_t interp // TODO : linear interpolation supports NULL value if (*prevValues != NULL && !outOfBound) { for (int32_t i = 1; i < numOfValCols; ++i) { - int32_t type = pModel->pFields[i].type; - char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); + SSchema* pSchema = getColumnModelSchema(pModel, i); + int16_t offset = getColumnModelOffset(pModel, i); + + int16_t type = pSchema->type; + char* val1 = getPos(data[i]->data, pSchema->bytes, pInterpoInfo->order, capacity, *num); if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) { - setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + setNull(val1, type, pSchema->bytes); continue; } - point1 = (SPoint){.key = *(TSKEY*)(*prevValues), .val = *prevValues + pModel->colOffset[i]}; - point2 = (SPoint){.key = currentTimestamp, .val = srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes}; + point1 = (SPoint){.key = *(TSKEY*)(*prevValues), .val = *prevValues + offset}; + point2 = (SPoint){.key = currentTimestamp, .val = srcData[i] + pInterpoInfo->rowIdx * pSchema->bytes}; point = (SPoint){.key = pInterpoInfo->startTimestamp, .val = val1}; - taosDoLinearInterpolation(pModel->pFields[i].type, &point1, &point2, &point); + taosDoLinearInterpolation(type, &point1, &point2, &point); } setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, numOfValCols, capacity, *num); } else { for (int32_t i = 1; i < numOfValCols; ++i) { - char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); - setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + char* val1 = getPos(data[i]->data, pSchema->bytes, pInterpoInfo->order, capacity, *num); + setNull(val1, pSchema->type, pSchema->bytes); } setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, numOfValCols, capacity, *num); } } else { /* default value interpolation */ for (int32_t i = 1; i < numOfValCols; ++i) { - char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); - assignVal(val1, (char*)&defaultVal[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + char* val1 = getPos(data[i]->data, pSchema->bytes, pInterpoInfo->order, capacity, *num); + assignVal(val1, (char*)&defaultVal[i], pSchema->bytes, pSchema->type); } setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, numOfValCols, capacity, *num); @@ -283,8 +309,8 @@ static void doInterpoResultImpl(SInterpolationInfo* pInterpoInfo, int16_t interp int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoType, tFilePage** data, int32_t numOfRawDataInRows, int32_t outputRows, int64_t nInterval, - int64_t* pPrimaryKeyArray, tColModel* pModel, char** srcData, int64_t* defaultVal, - int32_t* functionIDs, int32_t bufSize) { + const int64_t* pPrimaryKeyArray, SColumnModel* pModel, char** srcData, int64_t* defaultVal, + const int32_t* functionIDs, int32_t bufSize) { int32_t num = 0; pInterpoInfo->numOfCurrentInterpo = 0; @@ -316,17 +342,21 @@ int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoTyp (pInterpoInfo->startTimestamp > currentTimestamp && !INTERPOL_IS_ASC_INTERPOL(pInterpoInfo))) { /* set the next value for interpolation */ if (*nextValues == NULL) { - *nextValues = - calloc(1, pModel->colOffset[pModel->numOfCols - 1] + pModel->pFields[pModel->numOfCols - 1].bytes); + *nextValues = calloc(1, pModel->rowSize); for (int i = 1; i < pModel->numOfCols; i++) { - setNull(*nextValues + pModel->colOffset[i], pModel->pFields[i].type, pModel->pFields[i].bytes); + int16_t offset = getColumnModelOffset(pModel, i); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + setNull(*nextValues + offset, pSchema->type, pSchema->bytes); } } int32_t offset = pInterpoInfo->rowIdx; for (int32_t tlen = 0, i = 0; i < pModel->numOfCols - numOfTags; ++i) { - memcpy(*nextValues + tlen, srcData[i] + offset * pModel->pFields[i].bytes, pModel->pFields[i].bytes); - tlen += pModel->pFields[i].bytes; + SSchema* pSchema = getColumnModelSchema(pModel, i); + + memcpy(*nextValues + tlen, srcData[i] + offset * pSchema->bytes, pSchema->bytes); + tlen += pSchema->bytes; } } @@ -346,37 +376,41 @@ int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoTyp if (pInterpoInfo->startTimestamp == currentTimestamp) { if (*prevValues == NULL) { - *prevValues = - calloc(1, pModel->colOffset[pModel->numOfCols - 1] + pModel->pFields[pModel->numOfCols - 1].bytes); + *prevValues = calloc(1, pModel->rowSize); for (int i = 1; i < pModel->numOfCols; i++) { - setNull(*prevValues + pModel->colOffset[i], pModel->pFields[i].type, pModel->pFields[i].bytes); + int16_t offset = getColumnModelOffset(pModel, i); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + setNull(*prevValues + offset, pSchema->type, pSchema->bytes); } } // assign rows to dst buffer int32_t i = 0; for (int32_t tlen = 0; i < pModel->numOfCols - numOfTags; ++i) { - char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, bufSize, num); + int16_t offset = getColumnModelOffset(pModel, i); + SSchema* pSchema = getColumnModelSchema(pModel, i); + + char* val1 = getPos(data[i]->data, pSchema->bytes, pInterpoInfo->order, bufSize, num); if (i == 0 || (functionIDs[i] != TSDB_FUNC_COUNT && - !isNull(srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes, pModel->pFields[i].type)) || + !isNull(srcData[i] + pInterpoInfo->rowIdx * pSchema->bytes, pSchema->type)) || (functionIDs[i] == TSDB_FUNC_COUNT && - *(int64_t*)(srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes) != 0)) { - assignVal(val1, srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes, pModel->pFields[i].bytes, - pModel->pFields[i].type); - memcpy(*prevValues + tlen, srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes, - pModel->pFields[i].bytes); + *(int64_t*)(srcData[i] + pInterpoInfo->rowIdx * pSchema->bytes) != 0)) { + + assignVal(val1, srcData[i] + pInterpoInfo->rowIdx * pSchema->bytes, pSchema->bytes, pSchema->type); + memcpy(*prevValues + tlen, srcData[i] + pInterpoInfo->rowIdx * pSchema->bytes, pSchema->bytes); } else { // i > 0 and isNULL, do interpolation if (interpoType == TSDB_INTERPO_PREV) { - assignVal(val1, *prevValues + pModel->colOffset[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + assignVal(val1, *prevValues + offset, pSchema->bytes, pSchema->type); } else if (interpoType == TSDB_INTERPO_LINEAR) { // TODO: } else { - assignVal(val1, (char*)&defaultVal[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + assignVal(val1, (char*)&defaultVal[i], pSchema->bytes, pSchema->type); } } - tlen += pModel->pFields[i].bytes; + tlen += pSchema->bytes; } /* set the tag value for final result */ @@ -385,7 +419,7 @@ int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoTyp } pInterpoInfo->startTimestamp += (nInterval * step); - pInterpoInfo->rowIdx += step; + pInterpoInfo->rowIdx += 1; num += 1; if ((pInterpoInfo->rowIdx >= pInterpoInfo->numOfRawDataInRows && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 1a7f672e00321c0891aa54ae9f3cc3efedb89d54..21818e572f3fc49a2841c2f494362e2a7103f9f0 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -45,6 +45,7 @@ typedef struct { uint32_t uDebugFlag = 131; // all the messages short tsAsyncLog = 1; +static pid_t logPid = 0; static SLogBuff *logHandle = NULL; static int taosLogFileNum = 1; static int taosLogMaxLines = 0; @@ -82,6 +83,11 @@ int taosStartLog() { } int taosInitLog(char *logName, int numOfLogLines, int maxFiles) { + +#ifdef LINUX + logPid = (pid_t)syscall(SYS_gettid); +#endif + logHandle = taosLogBuffNew(TSDB_DEFAULT_LOG_BUF_SIZE); if (logHandle == NULL) return -1; @@ -306,8 +312,8 @@ char *tprefix(char *prefix) { sprintf(prefix, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); #else - sprintf(prefix, "%02d/%02d %02d:%02d:%02d.%06d 0x%lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, - ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + sprintf(prefix, "%02d/%02d %02d:%02d:%02d.%06d %d 0x%lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, logPid, pthread_self()); #endif return prefix; } @@ -333,8 +339,8 @@ void tprintf(const char *const flags, int dflag, const char *const format, ...) len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); #else - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, - ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, logPid, pthread_self()); #endif len += sprintf(buffer + len, "%s", flags); @@ -424,8 +430,8 @@ void taosPrintLongString(const char *const flags, int dflag, const char *const f len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); #else - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, - ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, logPid, pthread_self()); #endif len += sprintf(buffer + len, "%s", flags); diff --git a/src/util/src/tlosertree.c b/src/util/src/tlosertree.c index 7da03347b10cc7f6ff02df712f892a4289ffc1f1..4fe68970b9504d81cdbc779425c9439d32ff597c 100644 --- a/src/util/src/tlosertree.c +++ b/src/util/src/tlosertree.c @@ -13,10 +13,7 @@ * along with this program. If not, see . */ -#include -#include -#include - +#include "os.h" #include "taosmsg.h" #include "tlog.h" #include "tlosertree.h" @@ -48,7 +45,7 @@ uint8_t tLoserTreeCreate(SLoserTreeInfo** pTree, int32_t numOfEntries, void* par *pTree = (SLoserTreeInfo*)calloc(1, sizeof(SLoserTreeInfo) + sizeof(SLoserTreeNode) * totalEntries); if ((*pTree) == NULL) { - pError("allocate memory for losertree failed. out of memory"); + pError("allocate memory for loser-tree failed. reason:%s", strerror(errno)); return TSDB_CODE_CLI_OUT_OF_MEMORY; } diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index d12cc6e613cc9ce80574ab456e4fddb0ea4d75ad..7a133590d2a450d8e8b688bc63515c0ad9e81912 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -231,8 +231,13 @@ void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...) gettimeofday(&timeSecs, NULL); curTime = timeSecs.tv_sec; ptm = localtime_r(&curTime, &Tm); - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); - +#ifndef LINUX + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, + ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); +#else + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); +#endif va_start(argpointer, format); len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); va_end(argpointer); diff --git a/src/util/src/tpercentile.c b/src/util/src/tpercentile.c new file mode 100644 index 0000000000000000000000000000000000000000..b3c09033b4d48f5d00d2ad3deafea6d29e1be3ec --- /dev/null +++ b/src/util/src/tpercentile.c @@ -0,0 +1,976 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" + +#include "taosmsg.h" +#include "tsdb.h" +#include "tlog.h" +#include "ttypes.h" +#include "tpercentile.h" + +tExtMemBuffer *releaseBucketsExceptFor(tMemBucket *pMemBucket, int16_t segIdx, int16_t slotIdx) { + tExtMemBuffer *pBuffer = NULL; + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + if (i == segIdx && j == slotIdx) { + pBuffer = pSeg->pBuffer[j]; + } else { + if (pSeg->pBuffer && pSeg->pBuffer[j]) { + pSeg->pBuffer[j] = destoryExtMemBuffer(pSeg->pBuffer[j]); + } + } + } + } + + return pBuffer; +} + +static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx, + tOrderDescriptor *pDesc) { + // release all data in other slots + tExtMemBuffer *pMemBuffer = pMemBucket->pSegs[segIdx].pBuffer[slotIdx]; + tFilePage * buffer = (tFilePage *)calloc(1, pMemBuffer->nElemSize * pMemBuffer->numOfTotalElems + sizeof(tFilePage)); + int32_t oldCapacity = pDesc->pColumnModel->capacity; + pDesc->pColumnModel->capacity = pMemBuffer->numOfTotalElems; + + if (!tExtMemBufferIsAllDataInMem(pMemBuffer)) { + pMemBuffer = releaseBucketsExceptFor(pMemBucket, segIdx, slotIdx); + assert(pMemBuffer->numOfTotalElems > 0); + + // load data in disk to memory + tFilePage *pPage = (tFilePage *)calloc(1, pMemBuffer->pageSize); + + for (int32_t i = 0; i < pMemBuffer->fileMeta.flushoutData.nLength; ++i) { + tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[i]; + + int32_t ret = fseek(pMemBuffer->file, pFlushInfo->startPageId * pMemBuffer->pageSize, SEEK_SET); + UNUSED(ret); + + for (uint32_t j = 0; j < pFlushInfo->numOfPages; ++j) { + ret = fread(pPage, pMemBuffer->pageSize, 1, pMemBuffer->file); + UNUSED(ret); + assert(pPage->numOfElems > 0); + + tColModelAppend(pDesc->pColumnModel, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems); + printf("id: %d count: %" PRIu64 "\n", j, buffer->numOfElems); + } + } + tfree(pPage); + + assert(buffer->numOfElems == pMemBuffer->fileMeta.numOfElemsInFile); + } + + // load data in pMemBuffer to buffer + tFilePagesItem *pListItem = pMemBuffer->pHead; + while (pListItem != NULL) { + tColModelAppend(pDesc->pColumnModel, buffer, pListItem->item.data, 0, pListItem->item.numOfElems, + pListItem->item.numOfElems); + pListItem = pListItem->pNext; + } + + tColDataQSort(pDesc, buffer->numOfElems, 0, buffer->numOfElems - 1, buffer->data, TSQL_SO_ASC); + + pDesc->pColumnModel->capacity = oldCapacity; // restore value + return buffer; +} + +double findOnlyResult(tMemBucket *pMemBucket) { + assert(pMemBucket->numOfElems == 1); + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + if (pSeg->pBuffer) { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + tExtMemBuffer *pBuffer = pSeg->pBuffer[j]; + if (pBuffer) { + assert(pBuffer->numOfTotalElems == 1); + tFilePage *pPage = &pBuffer->pHead->item; + if (pBuffer->numOfElemsInBuffer == 1) { + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_INT: + return *(int32_t *)pPage->data; + case TSDB_DATA_TYPE_SMALLINT: + return *(int16_t *)pPage->data; + case TSDB_DATA_TYPE_TINYINT: + return *(int8_t *)pPage->data; + case TSDB_DATA_TYPE_BIGINT: + return (double)(*(int64_t *)pPage->data); + case TSDB_DATA_TYPE_DOUBLE: { + double dv = GET_DOUBLE_VAL(pPage->data); + //return *(double *)pPage->data; + return dv; + } + case TSDB_DATA_TYPE_FLOAT: { + float fv = GET_FLOAT_VAL(pPage->data); + //return *(float *)pPage->data; + return fv; + } + default: + return 0; + } + } + } + } + } + } + return 0; +} + +void tBucketBigIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { + int64_t v = *(int64_t *)value; + + if (pBucket->nRange.i64MaxVal == INT64_MIN) { + if (v >= 0) { + *segIdx = ((v >> (64 - 9)) >> 6) + 8; + *slotIdx = (v >> (64 - 9)) & 0x3F; + } else { // v<0 + *segIdx = ((-v) >> (64 - 9)) >> 6; + *slotIdx = ((-v) >> (64 - 9)) & 0x3F; + *segIdx = 7 - (*segIdx); + } + } else { + // todo hash for bigint and float and double + int64_t span = pBucket->nRange.i64MaxVal - pBucket->nRange.i64MinVal; + if (span < pBucket->nTotalSlots) { + int32_t delta = (int32_t)(v - pBucket->nRange.i64MinVal); + *segIdx = delta / pBucket->nSlotsOfSeg; + *slotIdx = delta % pBucket->nSlotsOfSeg; + } else { + double x = (double)span / pBucket->nTotalSlots; + double posx = (v - pBucket->nRange.i64MinVal) / x; + if (v == pBucket->nRange.i64MaxVal) { + posx -= 1; + } + + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } + } +} + +// todo refactor to more generic +void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { + int32_t v = *(int32_t *)value; + + if (pBucket->nRange.iMaxVal == INT32_MIN) { + /* + * taking negative integer into consideration, + * there is only half of pBucket->segs available for non-negative integer + */ + // int32_t numOfSlots = pBucket->nTotalSlots>>1; + // int32_t bits = bitsOfNumber(numOfSlots)-1; + + if (v >= 0) { + *segIdx = ((v >> (32 - 9)) >> 6) + 8; + *slotIdx = (v >> (32 - 9)) & 0x3F; + } else { // v<0 + *segIdx = ((-v) >> (32 - 9)) >> 6; + *slotIdx = ((-v) >> (32 - 9)) & 0x3F; + *segIdx = 7 - (*segIdx); + } + } else { + // divide a range of [iMinVal, iMaxVal] into 1024 buckets + int32_t span = pBucket->nRange.iMaxVal - pBucket->nRange.iMinVal; + if (span < pBucket->nTotalSlots) { + int32_t delta = v - pBucket->nRange.iMinVal; + *segIdx = delta / pBucket->nSlotsOfSeg; + *slotIdx = delta % pBucket->nSlotsOfSeg; + } else { + double x = (double)span / pBucket->nTotalSlots; + double posx = (v - pBucket->nRange.iMinVal) / x; + if (v == pBucket->nRange.iMaxVal) { + posx -= 1; + } + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } + } +} + +void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { + // double v = *(double *)value; + double v = GET_DOUBLE_VAL(value); + + if (pBucket->nRange.dMinVal == DBL_MAX) { + /* + * taking negative integer into consideration, + * there is only half of pBucket->segs available for non-negative integer + */ + double x = DBL_MAX / (pBucket->nTotalSlots >> 1); + double posx = (v + DBL_MAX) / x; + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } else { + // divide a range of [dMinVal, dMaxVal] into 1024 buckets + double span = pBucket->nRange.dMaxVal - pBucket->nRange.dMinVal; + if (span < pBucket->nTotalSlots) { + int32_t delta = (int32_t)(v - pBucket->nRange.dMinVal); + *segIdx = delta / pBucket->nSlotsOfSeg; + *slotIdx = delta % pBucket->nSlotsOfSeg; + } else { + double x = span / pBucket->nTotalSlots; + double posx = (v - pBucket->nRange.dMinVal) / x; + if (v == pBucket->nRange.dMaxVal) { + posx -= 1; + } + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } + + if (*segIdx < 0 || *segIdx > 16 || *slotIdx < 0 || *slotIdx > 64) { + pError("error in hash process. segment is: %d, slot id is: %d\n", *segIdx, *slotIdx); + } + } +} + +tMemBucket *tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, int16_t dataType, + tOrderDescriptor *pDesc) { + tMemBucket *pBucket = (tMemBucket *)malloc(sizeof(tMemBucket)); + + pBucket->nTotalSlots = totalSlots; + pBucket->nSlotsOfSeg = 1 << 6; // 64 Segments, 16 slots each seg. + pBucket->dataType = dataType; + pBucket->nElemSize = nElemSize; + pBucket->pageSize = DEFAULT_PAGE_SIZE; + + pBucket->numOfElems = 0; + pBucket->numOfSegs = pBucket->nTotalSlots / pBucket->nSlotsOfSeg; + + pBucket->nTotalBufferSize = nBufferSize; + + pBucket->maxElemsCapacity = pBucket->nTotalBufferSize / pBucket->nElemSize; + + pBucket->numOfTotalPages = pBucket->nTotalBufferSize / pBucket->pageSize; + pBucket->numOfAvailPages = pBucket->numOfTotalPages; + + pBucket->pOrderDesc = pDesc; + + switch (pBucket->dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + pBucket->nRange.iMinVal = INT32_MAX; + pBucket->nRange.iMaxVal = INT32_MIN; + pBucket->HashFunc = tBucketIntHash; + break; + }; + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_FLOAT: { + pBucket->nRange.dMinVal = DBL_MAX; + pBucket->nRange.dMaxVal = -DBL_MAX; + pBucket->HashFunc = tBucketDoubleHash; + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + pBucket->nRange.i64MinVal = INT64_MAX; + pBucket->nRange.i64MaxVal = INT64_MIN; + pBucket->HashFunc = tBucketBigIntHash; + break; + }; + default: { + pError("MemBucket:%p,not support data type %d,failed", *pBucket, pBucket->dataType); + tfree(pBucket); + return NULL; + } + } + + int32_t numOfCols = pDesc->pColumnModel->numOfCols; + if (numOfCols != 1) { + pError("MemBucket:%p,only consecutive data is allowed,invalid numOfCols:%d", pBucket, numOfCols); + tfree(pBucket); + return NULL; + } + + SSchema* pSchema = getColumnModelSchema(pDesc->pColumnModel, 0); + if (pSchema->type != dataType) { + pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", pBucket, pSchema->type, dataType); + tfree(pBucket); + return NULL; + } + + if (pBucket->numOfTotalPages < pBucket->nTotalSlots) { + pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", pBucket, pBucket->numOfTotalPages); + } + + pBucket->pSegs = (tMemBucketSegment *)malloc(pBucket->numOfSegs * sizeof(tMemBucketSegment)); + + for (int32_t i = 0; i < pBucket->numOfSegs; ++i) { + pBucket->pSegs[i].numOfSlots = pBucket->nSlotsOfSeg; + pBucket->pSegs[i].pBuffer = NULL; + pBucket->pSegs[i].pBoundingEntries = NULL; + } + + pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE, + pBucket->nElemSize); + + return pBucket; +} + +void tMemBucketDestroy(tMemBucket *pBucket) { + if (pBucket == NULL) { + return; + } + + if (pBucket->pSegs) { + for (int32_t i = 0; i < pBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &(pBucket->pSegs[i]); + tfree(pSeg->pBoundingEntries); + + if (pSeg->pBuffer == NULL || pSeg->numOfSlots == 0) { + continue; + } + + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + if (pSeg->pBuffer[j] != NULL) { + pSeg->pBuffer[j] = destoryExtMemBuffer(pSeg->pBuffer[j]); + } + } + tfree(pSeg->pBuffer); + } + } + + tfree(pBucket->pSegs); + tfree(pBucket); +} + +/* + * find the slots which accounts for largest proportion of total in-memory buffer + */ +static void tBucketGetMaxMemSlot(tMemBucket *pBucket, int16_t *segIdx, int16_t *slotIdx) { + *segIdx = -1; + *slotIdx = -1; + + int32_t val = 0; + for (int32_t k = 0; k < pBucket->numOfSegs; ++k) { + tMemBucketSegment *pSeg = &pBucket->pSegs[k]; + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + if (pSeg->pBuffer == NULL || pSeg->pBuffer[i] == NULL) { + continue; + } + + if (val < pSeg->pBuffer[i]->numOfInMemPages) { + val = pSeg->pBuffer[i]->numOfInMemPages; + *segIdx = k; + *slotIdx = i; + } + } + } +} + +static void resetBoundingBox(tMemBucketSegment *pSeg, int32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BIGINT: { + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + pSeg->pBoundingEntries[i].i64MaxVal = INT64_MIN; + pSeg->pBoundingEntries[i].i64MinVal = INT64_MAX; + } + break; + }; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + pSeg->pBoundingEntries[i].iMaxVal = INT32_MIN; + pSeg->pBoundingEntries[i].iMinVal = INT32_MAX; + } + break; + }; + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_FLOAT: { + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + pSeg->pBoundingEntries[i].dMaxVal = -DBL_MAX; + pSeg->pBoundingEntries[i].dMinVal = DBL_MAX; + } + break; + } + } +} + +void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) { + switch (dataType) { + case TSDB_DATA_TYPE_INT: { + int32_t val = *(int32_t *)data; + if (r->iMinVal > val) { + r->iMinVal = val; + } + + if (r->iMaxVal < val) { + r->iMaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + int64_t val = *(int64_t *)data; + if (r->i64MinVal > val) { + r->i64MinVal = val; + } + + if (r->i64MaxVal < val) { + r->i64MaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + int32_t val = *(int16_t *)data; + if (r->iMinVal > val) { + r->iMinVal = val; + } + + if (r->iMaxVal < val) { + r->iMaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_TINYINT: { + int32_t val = *(int8_t *)data; + if (r->iMinVal > val) { + r->iMinVal = val; + } + + if (r->iMaxVal < val) { + r->iMaxVal = val; + } + + break; + }; + case TSDB_DATA_TYPE_DOUBLE: { + // double val = *(double *)data; + double val = GET_DOUBLE_VAL(data); + if (r->dMinVal > val) { + r->dMinVal = val; + } + + if (r->dMaxVal < val) { + r->dMaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + // double val = *(float *)data; + double val = GET_FLOAT_VAL(data); + + if (r->dMinVal > val) { + r->dMinVal = val; + } + + if (r->dMaxVal < val) { + r->dMaxVal = val; + } + break; + }; + default: { assert(false); } + } +} + +/* + * in memory bucket, we only accept the simple data consecutive put in a row/column + * no column-model in this case. + */ +void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows) { + pBucket->numOfElems += numOfRows; + int16_t segIdx = 0, slotIdx = 0; + + for (int32_t i = 0; i < numOfRows; ++i) { + char *d = (char *)data + i * tDataTypeDesc[pBucket->dataType].nSize; + + switch (pBucket->dataType) { + case TSDB_DATA_TYPE_SMALLINT: { + int32_t val = *(int16_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + int32_t val = *(int8_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_INT: { + int32_t val = *(int32_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t val = *(int64_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + // double val = *(double *)d; + double val = GET_DOUBLE_VAL(d); + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + // double val = *(float *)d; + double val = GET_FLOAT_VAL(d); + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + } + + tMemBucketSegment *pSeg = &pBucket->pSegs[segIdx]; + if (pSeg->pBoundingEntries == NULL) { + pSeg->pBoundingEntries = (MinMaxEntry *)malloc(sizeof(MinMaxEntry) * pBucket->nSlotsOfSeg); + resetBoundingBox(pSeg, pBucket->dataType); + } + + if (pSeg->pBuffer == NULL) { + pSeg->pBuffer = (tExtMemBuffer **)calloc(pBucket->nSlotsOfSeg, sizeof(void *)); + } + + if (pSeg->pBuffer[slotIdx] == NULL) { + pSeg->pBuffer[slotIdx] = createExtMemBuffer(pBucket->numOfTotalPages * pBucket->pageSize, pBucket->nElemSize, + pBucket->pOrderDesc->pColumnModel); + pSeg->pBuffer[slotIdx]->flushModel = SINGLE_APPEND_MODEL; + pBucket->pOrderDesc->pColumnModel->capacity = pSeg->pBuffer[slotIdx]->numOfElemsPerPage; + } + + tMemBucketUpdateBoundingBox(&pSeg->pBoundingEntries[slotIdx], d, pBucket->dataType); + + // ensure available memory pages to allocate + int16_t cseg = 0, cslot = 0; + if (pBucket->numOfAvailPages == 0) { + pTrace("MemBucket:%p,max avail size:%d, no avail memory pages,", pBucket, pBucket->numOfTotalPages); + + tBucketGetMaxMemSlot(pBucket, &cseg, &cslot); + if (cseg == -1 || cslot == -1) { + pError("MemBucket:%p,failed to find appropriated avail buffer", pBucket); + return; + } + + if (cseg != segIdx || cslot != slotIdx) { + pBucket->numOfAvailPages += pBucket->pSegs[cseg].pBuffer[cslot]->numOfInMemPages; + + int32_t avail = pBucket->pSegs[cseg].pBuffer[cslot]->numOfInMemPages; + UNUSED(avail); + tExtMemBufferFlush(pBucket->pSegs[cseg].pBuffer[cslot]); + + pTrace("MemBucket:%p,seg:%d,slot:%d flushed to disk,new avail pages:%d", pBucket, cseg, cslot, + pBucket->numOfAvailPages); + } else { + pTrace("MemBucket:%p,failed to choose slot to flush to disk seg:%d,slot:%d", pBucket, cseg, cslot); + } + } + int16_t consumedPgs = pSeg->pBuffer[slotIdx]->numOfInMemPages; + + int16_t newPgs = tExtMemBufferPut(pSeg->pBuffer[slotIdx], d, 1); + /* + * trigger 1. page re-allocation, to reduce the available pages + * 2. page flushout, to increase the available pages + */ + pBucket->numOfAvailPages += (consumedPgs - newPgs); + } +} + +void releaseBucket(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { + if (segIdx < 0 || segIdx > pMemBucket->numOfSegs || slotIdx < 0) { + return; + } + + tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; + if (slotIdx < 0 || slotIdx >= pSeg->numOfSlots || pSeg->pBuffer[slotIdx] == NULL) { + return; + } + + pSeg->pBuffer[slotIdx] = destoryExtMemBuffer(pSeg->pBuffer[slotIdx]); +} + +//////////////////////////////////////////////////////////////////////////////////////////// +static void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) { + *minVal = DBL_MAX; + *maxVal = -DBL_MAX; + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + if (pSeg->pBuffer == NULL) { + continue; + } + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + double minv = pSeg->pBoundingEntries[j].iMinVal; + double maxv = pSeg->pBoundingEntries[j].iMaxVal; + + if (*minVal > minv) { + *minVal = minv; + } + if (*maxVal < maxv) { + *maxVal = maxv; + } + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_FLOAT: { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + double minv = pSeg->pBoundingEntries[j].dMinVal; + double maxv = pSeg->pBoundingEntries[j].dMaxVal; + + if (*minVal > minv) { + *minVal = minv; + } + if (*maxVal < maxv) { + *maxVal = maxv; + } + } + break; + } + case TSDB_DATA_TYPE_BIGINT: { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + double minv = (double)pSeg->pBoundingEntries[j].i64MinVal; + double maxv = (double)pSeg->pBoundingEntries[j].i64MaxVal; + + if (*minVal > minv) { + *minVal = minv; + } + if (*maxVal < maxv) { + *maxVal = maxv; + } + } + break; + } + } + } +} + +static MinMaxEntry getMinMaxEntryOfNearestSlotInNextSegment(tMemBucket *pMemBucket, int32_t segIdx) { + int32_t i = segIdx + 1; + while (i < pMemBucket->numOfSegs && pMemBucket->pSegs[i].numOfSlots == 0) ++i; + + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + assert(pMemBucket->numOfSegs > i && pMemBucket->pSegs[i].pBuffer != NULL); + + i = 0; + while (i < pMemBucket->nSlotsOfSeg && pSeg->pBuffer[i] == NULL) ++i; + + assert(i < pMemBucket->nSlotsOfSeg); + return pSeg->pBoundingEntries[i]; +} + +/* + * + * now, we need to find the minimum value of the next slot for + * interpolating the percentile value + * j is the last slot of current segment, we need to get the first + * slot of the next segment. + */ +static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; + + MinMaxEntry next; + if (slotIdx == pSeg->numOfSlots - 1) { // find next segment with data + return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx); + } else { + int32_t j = slotIdx + 1; + for (; j < pMemBucket->nSlotsOfSeg && pMemBucket->pSegs[segIdx].pBuffer[j] == 0; ++j) { + }; + + if (j == pMemBucket->nSlotsOfSeg) { // current slot has no available + // slot,try next segment + return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx); + } else { + next = pSeg->pBoundingEntries[slotIdx + 1]; + assert(pSeg->pBuffer[slotIdx + 1] != NULL); + } + } + + return next; +} + +bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx); +char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage); + +double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) { + int32_t num = 0; + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + if (pSeg->pBuffer == NULL || pSeg->pBuffer[j] == NULL) { + continue; + } + // required value in current slot + if (num < (count + 1) && num + pSeg->pBuffer[j]->numOfTotalElems >= (count + 1)) { + if (pSeg->pBuffer[j]->numOfTotalElems + num == (count + 1)) { + /* + * now, we need to find the minimum value of the next slot for interpolating the percentile value + * j is the last slot of current segment, we need to get the first slot of the next segment. + * + */ + MinMaxEntry next = getMinMaxEntryOfNextSlotWithData(pMemBucket, i, j); + + double maxOfThisSlot = 0; + double minOfNextSlot = 0; + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + maxOfThisSlot = pSeg->pBoundingEntries[j].iMaxVal; + minOfNextSlot = next.iMinVal; + break; + }; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + maxOfThisSlot = pSeg->pBoundingEntries[j].dMaxVal; + minOfNextSlot = next.dMinVal; + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + maxOfThisSlot = (double)pSeg->pBoundingEntries[j].i64MaxVal; + minOfNextSlot = (double)next.i64MinVal; + break; + } + }; + + assert(minOfNextSlot > maxOfThisSlot); + + double val = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot; + return val; + } + if (pSeg->pBuffer[j]->numOfTotalElems <= pMemBucket->maxElemsCapacity) { + // data in buffer and file are merged together to be processed. + tFilePage *buffer = loadIntoBucketFromDisk(pMemBucket, i, j, pMemBucket->pOrderDesc); + int32_t currentIdx = count - num; + + char * thisVal = buffer->data + pMemBucket->nElemSize * currentIdx; + char * nextVal = thisVal + pMemBucket->nElemSize; + double td, nd; + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_SMALLINT: { + td = *(int16_t *)thisVal; + nd = *(int16_t *)nextVal; + break; + } + case TSDB_DATA_TYPE_TINYINT: { + td = *(int8_t *)thisVal; + nd = *(int8_t *)nextVal; + break; + } + case TSDB_DATA_TYPE_INT: { + td = *(int32_t *)thisVal; + nd = *(int32_t *)nextVal; + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + // td = *(float *)thisVal; + // nd = *(float *)nextVal; + td = GET_FLOAT_VAL(thisVal); + nd = GET_FLOAT_VAL(nextVal); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + // td = *(double *)thisVal; + td = GET_DOUBLE_VAL(thisVal); + // nd = *(double *)nextVal; + nd = GET_DOUBLE_VAL(nextVal); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + td = (double)*(int64_t *)thisVal; + nd = (double)*(int64_t *)nextVal; + break; + } + } + double val = (1 - fraction) * td + fraction * nd; + tfree(buffer); + + return val; + } else { // incur a second round bucket split + if (isIdenticalData(pMemBucket, i, j)) { + tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j]; + + tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->pageSize); + + char *thisVal = getFirstElemOfMemBuffer(pSeg, j, pPage); + + double finalResult = 0.0; + + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_SMALLINT: { + finalResult = *(int16_t *)thisVal; + break; + } + case TSDB_DATA_TYPE_TINYINT: { + finalResult = *(int8_t *)thisVal; + break; + } + case TSDB_DATA_TYPE_INT: { + finalResult = *(int32_t *)thisVal; + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + // finalResult = *(float *)thisVal; + finalResult = GET_FLOAT_VAL(thisVal); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + // finalResult = *(double *)thisVal; + finalResult = GET_DOUBLE_VAL(thisVal); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + finalResult = (double)(*(int64_t *)thisVal); + break; + } + } + + free(pPage); + return finalResult; + } + + pTrace("MemBucket:%p,start second round bucketing", pMemBucket); + + if (pSeg->pBuffer[j]->numOfElemsInBuffer != 0) { + pTrace("MemBucket:%p,flush %d pages to disk, clear status", pMemBucket, pSeg->pBuffer[j]->numOfInMemPages); + + pMemBucket->numOfAvailPages += pSeg->pBuffer[j]->numOfInMemPages; + tExtMemBufferFlush(pSeg->pBuffer[j]); + } + + tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j]; + pSeg->pBuffer[j] = NULL; + + // release all + for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt]; + for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) { + if (pSeg->pBuffer && pSeg->pBuffer[ttx]) { + pSeg->pBuffer[ttx] = destoryExtMemBuffer(pSeg->pBuffer[ttx]); + } + } + } + + pMemBucket->nRange.i64MaxVal = pSeg->pBoundingEntries->i64MaxVal; + pMemBucket->nRange.i64MinVal = pSeg->pBoundingEntries->i64MinVal; + pMemBucket->numOfElems = 0; + + for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt]; + for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) { + if (pSeg->pBoundingEntries) { + resetBoundingBox(pSeg, pMemBucket->dataType); + } + } + } + + tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->pageSize); + + tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0]; + assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize); + + int32_t ret = fseek(pMemBuffer->file, pFlushInfo->startPageId * pMemBuffer->pageSize, SEEK_SET); + UNUSED(ret); + + for (uint32_t jx = 0; jx < pFlushInfo->numOfPages; ++jx) { + ret = fread(pPage, pMemBuffer->pageSize, 1, pMemBuffer->file); + UNUSED(ret); + tMemBucketPut(pMemBucket, pPage->data, pPage->numOfElems); + } + + fclose(pMemBuffer->file); + if (unlink(pMemBuffer->path) != 0) { + pError("MemBucket:%p, remove tmp file %s failed", pMemBucket, pMemBuffer->path); + } + tfree(pMemBuffer); + tfree(pPage); + + return getPercentileImpl(pMemBucket, count - num, fraction); + } + } else { + num += pSeg->pBuffer[j]->numOfTotalElems; + } + } + } + return 0; +} + +double getPercentile(tMemBucket *pMemBucket, double percent) { + if (pMemBucket->numOfElems == 0) { + return 0.0; + } + + if (pMemBucket->numOfElems == 1) { // return the only element + return findOnlyResult(pMemBucket); + } + + percent = fabs(percent); + + // validate the parameters + if (fabs(percent - 100.0) < DBL_EPSILON || (percent < DBL_EPSILON)) { + double minx = 0, maxx = 0; + /* + * find the min/max value, no need to scan all data in bucket + */ + findMaxMinValue(pMemBucket, &maxx, &minx); + + return fabs(percent - 100) < DBL_EPSILON ? maxx : minx; + } + + double percentVal = (percent * (pMemBucket->numOfElems - 1)) / ((double)100.0); + int32_t orderIdx = (int32_t)percentVal; + + // do put data by using buckets + return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx); +} + +/* + * check if data in one slot are all identical + * only need to compare with the bounding box + */ +bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; + + if (pMemBucket->dataType == TSDB_DATA_TYPE_INT || pMemBucket->dataType == TSDB_DATA_TYPE_BIGINT || + pMemBucket->dataType == TSDB_DATA_TYPE_SMALLINT || pMemBucket->dataType == TSDB_DATA_TYPE_TINYINT) { + return pSeg->pBoundingEntries[slotIdx].i64MinVal == pSeg->pBoundingEntries[slotIdx].i64MaxVal; + } + + if (pMemBucket->dataType == TSDB_DATA_TYPE_FLOAT || pMemBucket->dataType == TSDB_DATA_TYPE_DOUBLE) { + return fabs(pSeg->pBoundingEntries[slotIdx].dMaxVal - pSeg->pBoundingEntries[slotIdx].dMinVal) < DBL_EPSILON; + } + + return false; +} + +/* + * get the first element of one slot into memory. + * if no data of current slot in memory, load it from disk + */ +char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage) { + tExtMemBuffer *pMemBuffer = pSeg->pBuffer[slotIdx]; + char * thisVal = NULL; + + if (pSeg->pBuffer[slotIdx]->numOfElemsInBuffer != 0) { + thisVal = pSeg->pBuffer[slotIdx]->pHead->item.data; + } else { + /* + * no data in memory, load one page into memory + */ + tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0]; + assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize); + + fseek(pMemBuffer->file, pFlushInfo->startPageId * pMemBuffer->pageSize, SEEK_SET); + size_t ret = fread(pPage, pMemBuffer->pageSize, 1, pMemBuffer->file); + UNUSED(ret); + thisVal = pPage->data; + } + return thisVal; +} diff --git a/src/util/src/tresultBuf.c b/src/util/src/tresultBuf.c new file mode 100644 index 0000000000000000000000000000000000000000..31218670acc0a95c865de27ea945d1ed5ee19e29 --- /dev/null +++ b/src/util/src/tresultBuf.c @@ -0,0 +1,225 @@ +#include "hash.h" +#include "taoserror.h" +#include "textbuffer.h" +#include "tlog.h" +#include "tsqlfunction.h" +#include "tresultBuf.h" + +#define DEFAULT_INTERN_BUF_SIZE 16384L + +int32_t createResultBuf(SQueryResultBuf** pResultBuf, int32_t size, int32_t rowSize) { + SQueryResultBuf* pResBuf = calloc(1, sizeof(SQueryResultBuf)); + pResBuf->numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / rowSize; + pResBuf->numOfPages = size; + + pResBuf->totalBufSize = pResBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE; + pResBuf->incStep = 4; + + // init id hash table + pResBuf->idsTable = taosInitHashTable(size, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pResBuf->list = calloc(size, sizeof(SIDList)); + pResBuf->numOfAllocGroupIds = size; + + char path[4096] = {0}; + getTmpfilePath("tsdb_q_buf", path); + pResBuf->path = strdup(path); + + pResBuf->fd = open(pResBuf->path, O_CREAT | O_RDWR, 0666); + + memset(path, 0, tListLen(path)); + + if (!FD_VALID(pResBuf->fd)) { + pError("failed to create tmp file: %s on disk. %s", pResBuf->path, strerror(errno)); + return TSDB_CODE_CLI_NO_DISKSPACE; + } + + int32_t ret = ftruncate(pResBuf->fd, pResBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE); + if (ret != TSDB_CODE_SUCCESS) { + pError("failed to create tmp file: %s on disk. %s", pResBuf->path, strerror(errno)); + return TSDB_CODE_CLI_NO_DISKSPACE; + } + + pResBuf->pBuf = mmap(NULL, pResBuf->totalBufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pResBuf->fd, 0); + if (pResBuf->pBuf == MAP_FAILED) { + pError("QInfo:%p failed to map temp file: %s. %s", pResBuf->path, strerror(errno)); + return TSDB_CODE_CLI_OUT_OF_MEMORY; // todo change error code + } + + pTrace("create tmp file for output result, %s, " PRId64 "bytes", pResBuf->path, pResBuf->totalBufSize); + *pResultBuf = pResBuf; + return TSDB_CODE_SUCCESS; +} + +tFilePage* getResultBufferPageById(SQueryResultBuf* pResultBuf, int32_t id) { + assert(id < pResultBuf->numOfPages && id >= 0); + + return (tFilePage*)(pResultBuf->pBuf + DEFAULT_INTERN_BUF_SIZE * id); +} + +int32_t getNumOfResultBufGroupId(SQueryResultBuf* pResultBuf) { return taosNumElemsInHashTable(pResultBuf->idsTable); } + +int32_t getResBufSize(SQueryResultBuf* pResultBuf) { return pResultBuf->totalBufSize; } + +static int32_t extendDiskFileSize(SQueryResultBuf* pResultBuf, int32_t numOfPages) { + assert(pResultBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE == pResultBuf->totalBufSize); + + int32_t ret = munmap(pResultBuf->pBuf, pResultBuf->totalBufSize); + pResultBuf->numOfPages += numOfPages; + + /* + * disk-based output buffer is exhausted, try to extend the disk-based buffer, the available disk space may + * be insufficient + */ + ret = ftruncate(pResultBuf->fd, pResultBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE); + if (ret != 0) { + // dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile, + // strerror(errno)); + return -TSDB_CODE_SERV_NO_DISKSPACE; + } + + pResultBuf->totalBufSize = pResultBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE; + pResultBuf->pBuf = mmap(NULL, pResultBuf->totalBufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pResultBuf->fd, 0); + + if (pResultBuf->pBuf == MAP_FAILED) { + // dError("QInfo:%p failed to map temp file: %s. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); + return -TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + return TSDB_CODE_SUCCESS; +} + +static bool noMoreAvailablePages(SQueryResultBuf* pResultBuf) { + return (pResultBuf->allocateId == pResultBuf->numOfPages - 1); +} + +static int32_t getGroupIndex(SQueryResultBuf* pResultBuf, int32_t groupId) { + assert(pResultBuf != NULL); + + char* p = taosGetDataFromHashTable(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t)); + if (p == NULL) { // it is a new group id + return -1; + } + + int32_t slot = GET_INT32_VAL(p); + assert(slot >= 0 && slot < pResultBuf->numOfAllocGroupIds); + + return slot; +} + +static int32_t addNewGroupId(SQueryResultBuf* pResultBuf, int32_t groupId) { + int32_t num = getNumOfResultBufGroupId(pResultBuf); // the num is the newest allocated group id slot + + if (pResultBuf->numOfAllocGroupIds <= num) { + size_t n = pResultBuf->numOfAllocGroupIds << 1u; + + SIDList* p = (SIDList*)realloc(pResultBuf->list, sizeof(SIDList) * n); + assert(p != NULL); + + memset(&p[pResultBuf->numOfAllocGroupIds], 0, sizeof(SIDList) * pResultBuf->numOfAllocGroupIds); + + pResultBuf->list = p; + pResultBuf->numOfAllocGroupIds = n; + } + + taosAddToHashTable(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t), &num, sizeof(int32_t)); + return num; +} + +static int32_t doRegisterId(SIDList* pList, int32_t id) { + if (pList->size >= pList->alloc) { + int32_t s = 0; + if (pList->alloc == 0) { + s = 4; + assert(pList->pData == NULL); + } else { + s = pList->alloc << 1u; + } + + int32_t* c = realloc(pList->pData, s * sizeof(int32_t)); + assert(c); + + memset(&c[pList->alloc], 0, sizeof(int32_t) * pList->alloc); + + pList->pData = c; + pList->alloc = s; + } + + pList->pData[pList->size++] = id; + return 0; +} + +static void registerPageId(SQueryResultBuf* pResultBuf, int32_t groupId, int32_t pageId) { + int32_t slot = getGroupIndex(pResultBuf, groupId); + if (slot < 0) { + slot = addNewGroupId(pResultBuf, groupId); + } + + SIDList* pList = &pResultBuf->list[slot]; + doRegisterId(pList, pageId); +} + +tFilePage* getNewDataBuf(SQueryResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) { + if (noMoreAvailablePages(pResultBuf)) { + if (extendDiskFileSize(pResultBuf, pResultBuf->incStep) != TSDB_CODE_SUCCESS) { + return NULL; + } + } + + // register new id in this group + *pageId = (pResultBuf->allocateId++); + registerPageId(pResultBuf, groupId, *pageId); + + tFilePage* page = getResultBufferPageById(pResultBuf, *pageId); + + // clear memory for the new page + memset(page, 0, DEFAULT_INTERN_BUF_SIZE); + + return page; +} + +int32_t getNumOfRowsPerPage(SQueryResultBuf* pResultBuf) { return pResultBuf->numOfRowsPerPage; } + +SIDList getDataBufPagesIdList(SQueryResultBuf* pResultBuf, int32_t groupId) { + SIDList list = {0}; + int32_t slot = getGroupIndex(pResultBuf, groupId); + if (slot < 0) { + return list; + } else { + return pResultBuf->list[slot]; + } +} + +void destroyResultBuf(SQueryResultBuf* pResultBuf) { + if (pResultBuf == NULL) { + return; + } + + if (FD_VALID(pResultBuf->fd)) { + close(pResultBuf->fd); + } + + pTrace("disk-based output buffer closed, %" PRId64 " bytes, file:%s", pResultBuf->totalBufSize, pResultBuf->path); + munmap(pResultBuf->pBuf, pResultBuf->totalBufSize); + unlink(pResultBuf->path); + + tfree(pResultBuf->path); + + for (int32_t i = 0; i < pResultBuf->numOfAllocGroupIds; ++i) { + SIDList* pList = &pResultBuf->list[i]; + tfree(pList->pData); + } + + tfree(pResultBuf->list); + taosCleanUpHashTable(pResultBuf->idsTable); + + tfree(pResultBuf); +} + +int32_t getLastPageId(SIDList *pList) { + if (pList == NULL && pList->size <= 0) { + return -1; + } + + return pList->pData[pList->size - 1]; +} + diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c index bd49c670f6515f3e29dd5e37df8f333213f4aef3..56d16eeb7196bebaf871615ed93f06b424079180 100644 --- a/src/util/src/tsched.c +++ b/src/util/src/tsched.c @@ -16,6 +16,9 @@ #include "os.h" #include "tlog.h" #include "tsched.h" +#include "ttimer.h" + +#define DUMP_SCHEDULER_TIME_WINDOW 30000 //every 30sec, take a snap shot of task queue. typedef struct { char label[16]; @@ -28,10 +31,13 @@ typedef struct { int numOfThreads; pthread_t * qthread; SSchedMsg * queue; + + void* pTmrCtrl; + void* pTimer; } SSchedQueue; -void *taosProcessSchedQueue(void *param); -void taosCleanUpScheduler(void *param); +static void *taosProcessSchedQueue(void *param); +static void taosDumpSchedulerStatus(void *qhandle, void *tmrId); void *taosInitScheduler(int queueSize, int numOfThreads, const char *label) { pthread_attr_t attr; @@ -96,6 +102,17 @@ _error: return NULL; } +void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl) { + SSchedQueue* pSched = taosInitScheduler(queueSize, numOfThreads, label); + + if (tmrCtrl != NULL && pSched != NULL) { + pSched->pTmrCtrl = tmrCtrl; + taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); + } + + return pSched; +} + void *taosProcessSchedQueue(void *param) { SSchedMsg msg; SSchedQueue *pSched = (SSchedQueue *)param; @@ -128,6 +145,8 @@ void *taosProcessSchedQueue(void *param) { else if (msg.tfp) (*(msg.tfp))(msg.ahandle, msg.thandle); } + + return NULL; } int taosScheduleTask(void *qhandle, SSchedMsg *pMsg) { @@ -173,8 +192,27 @@ void taosCleanUpScheduler(void *param) { tsem_destroy(&pSched->emptySem); tsem_destroy(&pSched->fullSem); pthread_mutex_destroy(&pSched->queueMutex); + + if (pSched->pTimer) { + taosTmrStopA(&pSched->pTimer); + } free(pSched->queue); free(pSched->qthread); free(pSched); // fix memory leak } + +// for debug purpose, dump the scheduler status every 1min. +void taosDumpSchedulerStatus(void *qhandle, void *tmrId) { + SSchedQueue *pSched = (SSchedQueue *)qhandle; + if (pSched == NULL || pSched->pTimer == NULL || pSched->pTimer != tmrId) { + return; + } + + int32_t size = ((pSched->emptySlot - pSched->fullSlot) + pSched->queueSize) % pSched->queueSize; + if (size > 0) { + pTrace("scheduler:%s, current tasks in queue:%d, task thread:%d", pSched->label, size, pSched->numOfThreads); + } + + taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); +} diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 72469fa75ba52154a9ef21f0a92670fb3ca06b8c..01c91b6c6565e1dc2b0b1cd20eb8c7324eb70ea4 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -12,11 +12,7 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - -#include -#include -#include -#include +#include "os.h" #include "tlog.h" #include "tsdb.h" @@ -570,7 +566,7 @@ int32_t tSkipListIterateList(tSkipList *pSkipList, tSkipListNode ***pRes, bool ( char* tmp = realloc((*pRes), num * POINTER_BYTES); assert(tmp != NULL); - *pRes = tmp; + *pRes = (tSkipListNode**)tmp; } return num; @@ -688,7 +684,7 @@ void tSkipListPrint(tSkipList *pSkipList, int16_t nlevel) { case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BIGINT: - fprintf(stdout, "%d: %lld \n", id++, p->key.i64Key); + fprintf(stdout, "%d: %" PRId64 " \n", id++, p->key.i64Key); break; case TSDB_DATA_TYPE_BINARY: fprintf(stdout, "%d: %s \n", id++, p->key.pz); diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 6e8379c0a486aa35433742dea1af7b10ea3be682..7ab004646e3094d1a231e30eafaf6a966f0bba8b 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -19,8 +19,6 @@ #include "tsocket.h" #include "tutil.h" -unsigned int ip2uint(const char *const ip_addr); - /* * Function to get the public ip address of current machine. If get IP * successfully, return 0, else, return -1. The return values is ip. @@ -105,7 +103,7 @@ int taosGetPublicIp(char *const ip) { } // Function converting an IP address string to an unsigned int. -unsigned int ip2uint(const char *const ip_addr) { +uint32_t ip2uint(const char *const ip_addr) { char ip_addr_cpy[20]; char ip[5]; @@ -518,7 +516,7 @@ int taosCopyFds(int sfd, int dfd, int64_t len) { int retLen = taosReadMsg(sfd, temp, (int)readLen); if (readLen != retLen) { - pError("read error, readLen:%d retLen:%d len:%ld leftLen:%ld, reason:%s", readLen, retLen, len, leftLen, + pError("read error, readLen:%d retLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, retLen, len, leftLen, strerror(errno)); return -1; } @@ -526,7 +524,7 @@ int taosCopyFds(int sfd, int dfd, int64_t len) { writeLen = taosWriteMsg(dfd, temp, readLen); if (readLen != writeLen) { - pError("copy error, readLen:%d writeLen:%d len:%ld leftLen:%ld, reason:%s", readLen, writeLen, len, leftLen, + pError("copy error, readLen:%d writeLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, writeLen, len, leftLen, strerror(errno)); return -1; } diff --git a/src/util/src/tstrbuild.c b/src/util/src/tstrbuild.c index 6fb970bd6e3de13d6119089759fcf030d6c618fc..61a6d67952a73b8efd0c20a45214a1546f1f0258 100644 --- a/src/util/src/tstrbuild.c +++ b/src/util/src/tstrbuild.c @@ -12,11 +12,8 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - +#include "os.h" #include "tstrbuild.h" -#include -#include -#include void taosStringBuilderEnsureCapacity(SStringBuilder* sb, size_t size) { size += sb->pos; @@ -72,7 +69,7 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) { char buf[64]; - size_t len = sprintf(buf, "%lld", v); + size_t len = sprintf(buf, "%" PRId64, v); taosStringBuilderAppendStringLen(sb, buf, len); } diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c index 05ba01979e7e4281f760bd1573de0675a8d03e66..65c5d0ea4c86f2cd05628bbc2f9e35da89d0d7c6 100644 --- a/src/util/src/ttime.c +++ b/src/util/src/ttime.c @@ -24,6 +24,97 @@ #include "ttime.h" #include "tutil.h" +/* + * mktime64 - Converts date to seconds. + * Converts Gregorian date to seconds since 1970-01-01 00:00:00. + * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 + * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. + * + * [For the Julian calendar (which was used in Russia before 1917, + * Britain & colonies before 1752, anywhere else before 1582, + * and is still in use by some communities) leave out the + * -year/100+year/400 terms, and add 10.] + * + * This algorithm was first published by Gauss (I think). + * + * A leap second can be indicated by calling this function with sec as + * 60 (allowable under ISO 8601). The leap second is treated the same + * as the following second since they don't exist in UNIX time. + * + * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight + * tomorrow - (allowable under ISO 8601) is supported. + */ +int64_t user_mktime64(const unsigned int year0, const unsigned int mon0, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec) +{ + unsigned int mon = mon0, year = year0; + + /* 1..12 -> 11,12,1..10 */ + if (0 >= (int) (mon -= 2)) { + mon += 12; /* Puts Feb last since it has leap day */ + year -= 1; + } + + int64_t res = (((((int64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) + + year*365 - 719499)*24 + hour)*60 + min)*60 + sec); + + return (res + timezone); +} +// ==== mktime() kernel code =================// +static int64_t m_deltaUtc = 0; +void deltaToUtcInitOnce() { + struct tm tm = {0}; + + (void)strptime("1970-01-01 00:00:00", (const char *)("%Y-%m-%d %H:%M:%S"), &tm); + m_deltaUtc = (int64_t)mktime(&tm); + //printf("====delta:%lld\n\n", seconds); + return; +} + +int64_t user_mktime(struct tm * tm) +{ +#define TAOS_MINUTE 60 +#define TAOS_HOUR (60*TAOS_MINUTE) +#define TAOS_DAY (24*TAOS_HOUR) +#define TAOS_YEAR (365*TAOS_DAY) + +static int month[12] = { + 0, + TAOS_DAY*(31), + TAOS_DAY*(31+29), + TAOS_DAY*(31+29+31), + TAOS_DAY*(31+29+31+30), + TAOS_DAY*(31+29+31+30+31), + TAOS_DAY*(31+29+31+30+31+30), + TAOS_DAY*(31+29+31+30+31+30+31), + TAOS_DAY*(31+29+31+30+31+30+31+31), + TAOS_DAY*(31+29+31+30+31+30+31+31+30), + TAOS_DAY*(31+29+31+30+31+30+31+31+30+31), + TAOS_DAY*(31+29+31+30+31+30+31+31+30+31+30) +}; + + int64_t res; + int year; + + year= tm->tm_year - 70; + res= TAOS_YEAR*year + TAOS_DAY*((year+1)/4); + res+= month[tm->tm_mon]; + + if(tm->tm_mon > 1 && ((year+2)%4)) { + res-= TAOS_DAY; + } + + res+= TAOS_DAY*(tm->tm_mday-1); + res+= TAOS_HOUR*tm->tm_hour; + res+= TAOS_MINUTE*tm->tm_min; + res+= tm->tm_sec; + + return res + m_deltaUtc; + +} + + static int64_t parseFraction(char* str, char** end, int32_t timePrec); static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec); static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec); @@ -237,7 +328,10 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) { } /* mktime will be affected by TZ, set by using taos_options */ - int64_t seconds = mktime(&tm); + //int64_t seconds = mktime(&tm); + //int64_t seconds = (int64_t)user_mktime(&tm); + int64_t seconds = user_mktime64(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + int64_t fraction = 0; if (*str == '.') { diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c index e278f5b9f8f47d7a8450a6db26187a52b27cd846..ccac1de518af34c30cca8df05c8fca9f781bdd4e 100644 --- a/src/util/src/ttimer.c +++ b/src/util/src/ttimer.c @@ -14,7 +14,6 @@ */ #include "os.h" -#include #include "tlog.h" #include "tsched.h" #include "ttime.h" @@ -254,13 +253,13 @@ static void processExpiredTimer(void* handle, void* arg) { timer->executedBy = taosGetPthreadId(); uint8_t state = atomic_val_compare_exchange_8(&timer->state, TIMER_STATE_WAITING, TIMER_STATE_EXPIRED); if (state == TIMER_STATE_WAITING) { - const char* fmt = "%s timer[id=" PRIuPTR ", fp=%p, param=%p] execution start."; + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] execution start."; tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); (*timer->fp)(timer->param, (tmr_h)timer->id); atomic_store_8(&timer->state, TIMER_STATE_STOPPED); - fmt = "%s timer[id=" PRIuPTR ", fp=%p, param=%p] execution end."; + fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] execution end."; tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); } removeTimer(timer->id); @@ -268,7 +267,7 @@ static void processExpiredTimer(void* handle, void* arg) { } static void addToExpired(tmr_obj_t* head) { - const char* fmt = "%s adding expired timer[id=" PRIuPTR ", fp=%p, param=%p] to queue."; + const char* fmt = "%s adding expired timer[id=%" PRIuPTR ", fp=%p, param=%p] to queue."; while (head != NULL) { uintptr_t id = head->id; @@ -282,7 +281,7 @@ static void addToExpired(tmr_obj_t* head) { schedMsg.thandle = NULL; taosScheduleTask(tmrQhandle, &schedMsg); - tmrTrace("timer[id=" PRIuPTR "] has been added to queue.", id); + tmrTrace("timer[id=%" PRIuPTR "] has been added to queue.", id); head = next; } } @@ -296,7 +295,7 @@ static uintptr_t doStartTimer(tmr_obj_t* timer, TAOS_TMR_CALLBACK fp, int msecon timer->ctrl = ctrl; addTimer(timer); - const char* fmt = "%s timer[id=" PRIuPTR ", fp=%p, param=%p] started"; + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] started"; tmrTrace(fmt, ctrl->label, timer->id, timer->fp, timer->param); if (mseconds == 0) { @@ -389,7 +388,7 @@ static bool doStopTimer(tmr_obj_t* timer, uint8_t state) { // we cannot guarantee the thread safety of the timr in all other cases. reusable = true; } - const char* fmt = "%s timer[id=" PRIuPTR ", fp=%p, param=%p] is cancelled."; + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] is cancelled."; tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); return reusable; } @@ -409,7 +408,7 @@ static bool doStopTimer(tmr_obj_t* timer, uint8_t state) { // timer callback is executing in another thread, we SHOULD wait it to stop, // BUT this may result in dead lock if current thread are holding a lock which // the timer callback need to acquire. so, we HAVE TO return directly. - const char* fmt = "%s timer[id=" PRIuPTR ", fp=%p, param=%p] is executing and cannot be stopped."; + const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] is executing and cannot be stopped."; tmrTrace(fmt, timer->ctrl->label, timer->id, timer->fp, timer->param); return false; } @@ -419,7 +418,7 @@ bool taosTmrStop(tmr_h timerId) { tmr_obj_t* timer = findTimer(id); if (timer == NULL) { - tmrTrace("timer[id=" PRIuPTR "] does not exist", id); + tmrTrace("timer[id=%" PRIuPTR "] does not exist", id); return false; } @@ -446,7 +445,7 @@ bool taosTmrReset(TAOS_TMR_CALLBACK fp, int mseconds, void* param, void* handle, bool stopped = false; tmr_obj_t* timer = findTimer(id); if (timer == NULL) { - tmrTrace("%s timer[id=" PRIuPTR "] does not exist", ctrl->label, id); + tmrTrace("%s timer[id=%" PRIuPTR "] does not exist", ctrl->label, id); } else { uint8_t state = atomic_val_compare_exchange_8(&timer->state, TIMER_STATE_WAITING, TIMER_STATE_CANCELED); if (!doStopTimer(timer, state)) { @@ -461,7 +460,7 @@ bool taosTmrReset(TAOS_TMR_CALLBACK fp, int mseconds, void* param, void* handle, return stopped; } - tmrTrace("%s timer[id=" PRIuPTR "] is reused", ctrl->label, timer->id); + tmrTrace("%s timer[id=%" PRIuPTR "] is reused", ctrl->label, timer->id); // wait until there's no other reference to this timer, // so that we can reuse this timer safely. diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index af8174456c6b7180bc2d4c81087228e54820e365..7cbb4552b410536176f5e69d9ee9336af197d94f 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -14,11 +14,13 @@ */ #include "os.h" +#include "hashutil.h" #include "shash.h" #include "tutil.h" #include "tsqldef.h" #include "tstoken.h" #include "ttypes.h" +#include "hash.h" // All the keywords of the SQL language are stored in a hash table typedef struct SKeyword { @@ -225,11 +227,15 @@ static SKeyword keywordTable[] = { {"STABLE", TK_STABLE}, {"FILE", TK_FILE}, {"VNODES", TK_VNODES}, + {"UNION", TK_UNION}, + {"RATE", TK_RATE}, + {"IRATE", TK_IRATE}, + {"SUM_RATE", TK_SUM_RATE}, + {"SUM_IRATE", TK_SUM_IRATE}, + {"AVG_RATE", TK_AVG_RATE}, + {"AVG_IRATE", TK_AVG_IRATE}, }; -/* This is the hash table */ -static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; - static const char isIdChar[] = { /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ @@ -243,27 +249,22 @@ static const char isIdChar[] = { }; static void* KeywordHashTable = NULL; -int tSQLKeywordCode(const char* z, int n) { - int i; - static char needInit = 1; - if (needInit) { - // Initialize the keyword hash table - pthread_mutex_lock(&mutex); - - // double check - if (needInit) { - int nk = tListLen(keywordTable); - - KeywordHashTable = taosInitStrHash(nk, POINTER_BYTES, taosHashStringStep1); - for (i = 0; i < nk; i++) { - keywordTable[i].len = strlen(keywordTable[i].name); - void* ptr = &keywordTable[i]; - taosAddStrHash(KeywordHashTable, (char*)keywordTable[i].name, (void*)&ptr); - } - needInit = 0; - } - pthread_mutex_unlock(&mutex); + +static void doInitKeywordsTable() { + int numOfEntries = tListLen(keywordTable); + + KeywordHashTable = taosInitHashTable(numOfEntries, MurmurHash3_32, false); + for (int32_t i = 0; i < numOfEntries; i++) { + keywordTable[i].len = strlen(keywordTable[i].name); + void* ptr = &keywordTable[i]; + taosAddToHashTable(KeywordHashTable, keywordTable[i].name, keywordTable[i].len, (void*)&ptr, POINTER_BYTES); } +} + +static pthread_once_t keywordsHashTableInit = PTHREAD_ONCE_INIT; + +int tSQLKeywordCode(const char* z, int n) { + pthread_once(&keywordsHashTableInit, doInitKeywordsTable); char key[128] = {0}; for (int32_t j = 0; j < n; ++j) { @@ -274,7 +275,7 @@ int tSQLKeywordCode(const char* z, int n) { } } - SKeyword** pKey = (SKeyword**)taosGetStrHashData(KeywordHashTable, key); + SKeyword** pKey = (SKeyword**)taosGetDataFromHashTable(KeywordHashTable, key, n); if (pKey != NULL) { return (*pKey)->type; } else { @@ -418,7 +419,12 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { int delim = z[0]; bool strEnd = false; for (i = 1; z[i]; i++) { - if (z[i] == delim) { + if (z[i] == '\\') { + i++; + continue; + } + + if (z[i] == delim ) { if (z[i + 1] == delim) { i++; } else { @@ -427,6 +433,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { } } } + if (z[i]) i++; if (strEnd) { @@ -504,7 +511,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { if ((z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' || z[i] == 'y' || z[i] == 'w' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' || z[i] == 'Y' || z[i] == 'W') && - (isIdChar[z[i + 1]] == 0)) { + (isIdChar[(uint8_t)z[i + 1]] == 0)) { *tokenType = TK_VARIABLE; i += 1; return i; @@ -545,7 +552,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { case 't': case 'F': case 'f': { - for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[z[i]]; i++) { + for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[(uint8_t) z[i]]; i++) { } if ((i == 4 && strncasecmp(z, "true", 4) == 0) || (i == 5 && strncasecmp(z, "false", 5) == 0)) { @@ -554,10 +561,10 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { } } default: { - if (((*z & 0x80) != 0) || !isIdChar[*z]) { + if (((*z & 0x80) != 0) || !isIdChar[(uint8_t) *z]) { break; } - for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[z[i]]; i++) { + for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[(uint8_t) z[i]]; i++) { } *tokenType = tSQLKeywordCode(z, i); return i; diff --git a/src/util/src/ttypes.c b/src/util/src/ttypes.c index 98f0741905c912684fa44d0ed22cbe24970da069..ae994cb77b7cdb27f3e857115d6d1db7df9bd9b0 100644 --- a/src/util/src/ttypes.c +++ b/src/util/src/ttypes.c @@ -12,7 +12,6 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - #include "os.h" #include "taos.h" #include "tsdb.h" @@ -140,7 +139,7 @@ void tVariantCreateFromBinary(tVariant *pVar, char *pz, uint32_t len, uint32_t t } case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length pVar->nLen = len / TSDB_NCHAR_SIZE; - pVar->wpz = malloc((pVar->nLen + 1) * TSDB_NCHAR_SIZE); + pVar->wpz = calloc(1, (pVar->nLen + 1) * TSDB_NCHAR_SIZE); wcsncpy(pVar->wpz, (wchar_t *)pz, pVar->nLen); pVar->wpz[pVar->nLen] = 0; @@ -164,14 +163,13 @@ void tVariantCreateFromBinary(tVariant *pVar, char *pz, uint32_t len, uint32_t t void tVariantDestroy(tVariant *pVar) { if (pVar == NULL) return; - if ((pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) && pVar->nLen > 0) { - free(pVar->pz); - pVar->pz = NULL; + if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) { + tfree(pVar->pz); pVar->nLen = 0; } } -void tVariantAssign(tVariant *pDst, tVariant *pSrc) { +void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { if (pSrc == NULL || pDst == NULL) return; *pDst = *pSrc; @@ -213,7 +211,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { return sprintf(dst, "%d", (int32_t)pVar->i64Key); case TSDB_DATA_TYPE_BIGINT: - return sprintf(dst, "%lld", pVar->i64Key); + return sprintf(dst, "%" PRId64, pVar->i64Key); case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_DOUBLE: @@ -224,6 +222,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { } } +#if 0 static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type, bool releaseVariantPtr) { if (pVariant->nType == TSDB_DATA_TYPE_NULL) { setNull(pDest, type, tDataTypeDesc[type].nSize); @@ -337,7 +336,7 @@ static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type, return 0; } - +#endif static FORCE_INLINE int32_t convertToBoolImpl(char *pStr, int32_t len) { if ((strncasecmp(pStr, "true", len) == 0) && (len == 4)) { return TSDB_TRUE; @@ -386,7 +385,7 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { } else { if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { - sprintf(pBuf == NULL ? *pDest : pBuf, "%lld", pVariant->i64Key); + sprintf(pBuf == NULL ? *pDest : pBuf, "%" PRId64, pVariant->i64Key); } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { sprintf(pBuf == NULL ? *pDest : pBuf, "%lf", pVariant->dKey); } else if (pVariant->nType == TSDB_DATA_TYPE_BOOL) { @@ -411,7 +410,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { int32_t nLen = 0; if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { - nLen = sprintf(pDst, "%lld", pVariant->i64Key); + nLen = sprintf(pDst, "%" PRId64, pVariant->i64Key); } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { nLen = sprintf(pDst, "%lf", pVariant->dKey); } else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { @@ -437,7 +436,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { char* tmp = realloc(pVariant->wpz, (*pDestSize + 1)*TSDB_NCHAR_SIZE); assert(tmp != NULL); - pVariant->wpz = tmp; + pVariant->wpz = (wchar_t *)tmp; } else { taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE); } @@ -726,7 +725,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, char type) { *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; return 0; } else { - double value; + double value = 0; int32_t ret; ret = convertToDouble(pVariant->pz, pVariant->nLen, &value); if ((errno == ERANGE && value == -1) || (ret != 0)) { @@ -970,18 +969,28 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { } } -void assignVal(char *val, char *src, int32_t len, int32_t type) { +void assignVal(char *val, const char *src, int32_t len, int32_t type) { switch (type) { case TSDB_DATA_TYPE_INT: { *((int32_t *)val) = GET_INT32_VAL(src); break; } case TSDB_DATA_TYPE_FLOAT: { + #ifdef _TD_ARM_32_ + float fv = GET_FLOAT_VAL(src); + SET_FLOAT_VAL_ALIGN(val, &fv); + #else *((float *)val) = GET_FLOAT_VAL(src); + #endif break; }; case TSDB_DATA_TYPE_DOUBLE: { + #ifdef _TD_ARM_32_ + double dv = GET_DOUBLE_VAL(src); + SET_DOUBLE_VAL_ALIGN(val, &dv); + #else *((double *)val) = GET_DOUBLE_VAL(src); + #endif break; }; case TSDB_DATA_TYPE_TIMESTAMP: @@ -998,6 +1007,14 @@ void assignVal(char *val, char *src, int32_t len, int32_t type) { *((int8_t *)val) = GET_INT8_VAL(src); break; }; + case TSDB_DATA_TYPE_BINARY: { + strncpy(val, src, len); + break; + }; + case TSDB_DATA_TYPE_NCHAR: { + wcsncpy((wchar_t*)val, (wchar_t*)src, len / TSDB_NCHAR_SIZE); + break; + }; default: { memcpy(val, src, len); break; diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index 86e133df17957b57935659f9def12160020f947b..21d147d9473fb643df3bdc719aa19699619ea2d7 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -24,6 +24,8 @@ #include "ttime.h" #include "ttypes.h" #include "tutil.h" +#include "tlog.h" +#include "taoserror.h" int32_t strdequote(char *z) { if (z == NULL) { @@ -106,6 +108,7 @@ char **strsplit(char *z, const char *delim, int32_t *num) { if ((*num) >= size) { size = (size << 1); split = realloc(split, POINTER_BYTES * size); + assert(NULL != split); } } @@ -483,17 +486,15 @@ bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len) #endif } -bool taosValidateEncodec(char *encodec) { +bool taosValidateEncodec(const char *encodec) { #ifdef USE_LIBICONV iconv_t cd = iconv_open(encodec, DEFAULT_UNICODE_ENCODEC); if (cd == (iconv_t)(-1)) { return false; } iconv_close(cd); - return true; -#else - return true; #endif + return true; } bool taosGetVersionNumber(char *versionStr, int *versionNubmer) { @@ -525,6 +526,35 @@ bool taosGetVersionNumber(char *versionStr, int *versionNubmer) { return true; } +int taosCheckVersion(char *input_client_version, char *input_server_version, int comparedSegments) { + char client_version[64] = {0}; + char server_version[64] = {0}; + int clientVersionNumber[4] = {0}; + int serverVersionNumber[4] = {0}; + + strcpy(client_version, input_client_version); + strcpy(server_version, input_server_version); + + if (!taosGetVersionNumber(client_version, clientVersionNumber)) { + pError("invalid client version:%s", client_version); + return TSDB_CODE_INVALID_CLIENT_VERSION; + } + + if (!taosGetVersionNumber(server_version, serverVersionNumber)) { + pError("invalid server version:%s", server_version); + return TSDB_CODE_INVALID_CLIENT_VERSION; + } + + for(int32_t i = 0; i < comparedSegments; ++i) { + if (clientVersionNumber[i] != serverVersionNumber[i]) { + tscError("the %d-th number of server version:%s not matched with client version:%s", i, server_version, version); + return TSDB_CODE_INVALID_CLIENT_VERSION; + } + } + + return 0; +} + char *taosIpStr(uint32_t ipInt) { static char ipStrArray[3][30]; static int ipStrIndex = 0; @@ -538,13 +568,13 @@ char *taosIpStr(uint32_t ipInt) { void taosCleanupTier() {} #endif -FORCE_INLINE float taos_align_get_float(char* pBuf) { +FORCE_INLINE float taos_align_get_float(const char* pBuf) { float fv = 0; *(int32_t*)(&fv) = *(int32_t*)pBuf; return fv; } -FORCE_INLINE double taos_align_get_double(char* pBuf) { +FORCE_INLINE double taos_align_get_double(const char* pBuf) { double dv = 0; *(int64_t*)(&dv) = *(int64_t*)pBuf; return dv; diff --git a/src/util/src/version.c b/src/util/src/version.c index c85289fb8ad2f661ec4786c20b3e8ec6207b561d..d1294801ee7e29cdc91e8096889e7b06baf7bb67 100644 --- a/src/util/src/version.c +++ b/src/util/src/version.c @@ -1,5 +1,7 @@ -char version[64] = "1.6.4.4"; +char version[64] = "1.6.5.4"; char compatible_version[64] = "1.6.1.0"; -char gitinfo[128] = "d62c5c30231d04a736d437cf428af6e12599bd9f"; -char gitinfoOfInternal[128] = "8094a32d78dc519bd883d01ac2ba6ec49ac57a80"; -char buildinfo[512] = "Built by ubuntu at 2019-12-16 21:40"; +char gitinfo[128] = "3264067e97300c84caa61ac909d548c9ca56de6b"; +char gitinfoOfInternal[128] = "da88f4a2474737d1f9c76adcf0ff7fd0975e7342"; +char buildinfo[512] = "Built by root at 2020-02-05 14:38"; + +void libtaos_1_6_5_4_Linux_x64() {}; diff --git a/tests/examples/JDBC/readme.md b/tests/examples/JDBC/JDBCDemo/readme.md similarity index 100% rename from tests/examples/JDBC/readme.md rename to tests/examples/JDBC/JDBCDemo/readme.md diff --git a/tests/examples/JDBC/SpringJdbcTemplate/.gitignore b/tests/examples/JDBC/SpringJdbcTemplate/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..175de5c653d2f49b2ad1227764e60f741110592d --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/.gitignore @@ -0,0 +1,31 @@ +HELP.md +target/ +.mvn/ +!**/src/main/** +!**/src/test/** + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..45abc5354abf623082f6f317c19c9adedf17a097 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -0,0 +1,85 @@ + + + + 4.0.0 + + com.taosdata.jdbc + SpringJdbcTemplate + 1.0-SNAPSHOT + + SpringJdbcTemplate + http://www.taosdata.com + + + UTF-8 + 1.8 + 1.8 + + + + + + org.springframework + spring-context + 4.3.2.RELEASE + + + + org.springframework + spring-jdbc + 4.3.2.RELEASE + + + + junit + junit + 4.11 + test + + + + com.taosdata.jdbc + taos-jdbcdriver + 1.0.3 + + + + + + + + maven-compiler-plugin + 3.8.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.1.0 + + + + com.taosdata.jdbc.App + + + + jar-with-dependencies + + + + + make-assembly + package + + single + + + + + + + diff --git a/tests/examples/JDBC/SpringJdbcTemplate/readme.md b/tests/examples/JDBC/SpringJdbcTemplate/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..1fe8809b506c248f226edd0f3200c6e352c0a73b --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/readme.md @@ -0,0 +1,34 @@ + +## TDengine Spring JDBC Template Demo + +`Spring JDBC Template` 简化了原生 JDBC Connection 获取释放等操作,使得操作数据库更加方便。 + +### 配置 + +修改 `src/main/resources/applicationContext.xml` 文件中 TDengine 的配置信息: + +```xml + + + + + + + + + + + + +``` + +### 打包运行 + +进入 `TDengine/tests/examples/JDBC/SpringJdbcTemplate` 目录下,执行以下命令可以生成可执行 jar 包。 +```shell +mvn clean package +``` +打包成功之后,进入 `target/` 目录下,执行以下命令就可运行测试: +```shell +java -jar SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar +``` \ No newline at end of file diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java new file mode 100644 index 0000000000000000000000000000000000000000..3230af46a8016fee3d58c89ea3b2c1ddcf39cea5 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/jdbc/App.java @@ -0,0 +1,44 @@ +package com.taosdata.jdbc; + + +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.util.CollectionUtils; + +import java.util.List; +import java.util.Map; + +public class App { + + public static void main( String[] args ) { + + ApplicationContext ctx = new ClassPathXmlApplicationContext("applicationContext.xml"); + + JdbcTemplate jdbcTemplate = (JdbcTemplate) ctx.getBean("jdbcTemplate"); + + // create database + jdbcTemplate.execute("create database if not exists db "); + + // create table + jdbcTemplate.execute("create table if not exists db.tb (ts timestamp, temperature int, humidity float)"); + + String insertSql = "insert into db.tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"; + + // insert rows + int affectedRows = jdbcTemplate.update(insertSql); + + System.out.println("insert success " + affectedRows + " rows."); + + // query for list + List> resultList = jdbcTemplate.queryForList("select * from db.tb"); + + if(!CollectionUtils.isEmpty(resultList)){ + for (Map row : resultList){ + System.out.printf("%s, %d, %s\n", row.get("ts"), row.get("temperature"), row.get("humidity")); + } + } + + } + +} diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml b/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml new file mode 100644 index 0000000000000000000000000000000000000000..41128148ec3fb69f342c634cc8e9dd9fbd3c0037 --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/main/resources/applicationContext.xml @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + diff --git a/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java new file mode 100644 index 0000000000000000000000000000000000000000..d6a699598e73470663af4eb04e03a9a6b083bc4c --- /dev/null +++ b/tests/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/jdbc/AppTest.java @@ -0,0 +1,20 @@ +package com.taosdata.jdbc; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +/** + * Unit test for simple App. + */ +public class AppTest +{ + /** + * Rigorous Test :-) + */ + @Test + public void shouldAnswerWithTrue() + { + assertTrue( true ); + } +} diff --git a/tests/examples/JDBC/springbootdemo/.gitignore b/tests/examples/JDBC/springbootdemo/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b8a47adccb623c653c547481ff9d3221210f31ef --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/.gitignore @@ -0,0 +1,30 @@ +.mvn/ +target/ +!**/src/main/** +!**/src/test/** + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/springbootdemo/.mvn/wrapper/MavenWrapperDownloader.java b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000000000000000000000000000000000..74f4de40122aca522184d5b1aac4f0ac29888b1a --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,118 @@ +/* + * Copyright 2012-2019 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.5"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if (mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if (mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.jar b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..0d5e649888a4843c1520054d9672f80c62ebbb48 Binary files /dev/null and b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.jar differ diff --git a/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.properties b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000000000000000000000000000000000..7d59a01f2594defa27705a493da0e4d57465aa2d --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.2/apache-maven-3.6.2-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar diff --git a/tests/examples/JDBC/springbootdemo/mvnw b/tests/examples/JDBC/springbootdemo/mvnw new file mode 100755 index 0000000000000000000000000000000000000000..21d3ee84568ff68c4712677da7c3b06f61ab5543 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/mvnw @@ -0,0 +1,310 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/tests/examples/JDBC/springbootdemo/mvnw.cmd b/tests/examples/JDBC/springbootdemo/mvnw.cmd new file mode 100644 index 0000000000000000000000000000000000000000..84d60abc339b13f80f3300b00387f2d4cc4eb328 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..74522979c068120ac175f324dced6e8cd66ca1d8 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -0,0 +1,87 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 2.2.1.RELEASE + + + com.taosdata.jdbc + springbootdemo + 0.0.1-SNAPSHOT + springbootdemo + Demo project for using tdengine with Spring Boot + + + 1.8 + + + + + org.springframework.boot + spring-boot-starter-data-jdbc + + + org.springframework.boot + spring-boot-starter-thymeleaf + + + org.springframework.boot + spring-boot-starter-web + + + org.mybatis.spring.boot + mybatis-spring-boot-starter + 2.1.1 + + + + org.springframework.boot + spring-boot-devtools + runtime + true + + + org.springframework.boot + spring-boot-configuration-processor + true + + + org.springframework.boot + spring-boot-starter-test + test + + + org.junit.vintage + junit-vintage-engine + + + + + + com.taosdata.jdbc + taos-jdbcdriver + 1.0.3 + + + + com.alibaba + druid-spring-boot-starter + 1.1.17 + + + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + diff --git a/tests/examples/JDBC/springbootdemo/readme.md b/tests/examples/JDBC/springbootdemo/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..64aabedcdce5d16a610f4f2b084b8d62d54ff133 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/readme.md @@ -0,0 +1,96 @@ +## TDengine SpringBoot + Mybatis Demo + +### 配置 application.properties +```properties +# datasource config +spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver +spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.username=root +spring.datasource.password=taosdata + +spring.datasource.druid.initial-size=5 +spring.datasource.druid.min-idle=5 +spring.datasource.druid.max-active=5 +# max wait time for get connection, ms +spring.datasource.druid.max-wait=60000 + +spring.datasource.druid.validation-query=describe log.dn +spring.datasource.druid.validation-query-timeout=5000 +spring.datasource.druid.test-on-borrow=false +spring.datasource.druid.test-on-return=false +spring.datasource.druid.test-while-idle=true +spring.datasource.druid.time-between-eviction-runs-millis=60000 +spring.datasource.druid.min-evictable-idle-time-millis=600000 +spring.datasource.druid.max-evictable-idle-time-millis=900000 + +# mybatis +mybatis.mapper-locations=classpath:mapper/*.xml + +# log +logging.level.com.taosdata.jdbc.springbootdemo.dao=debug +``` + +### 主要功能 + +* 创建数据库和表 +```xml + + + create database if not exists test; + + + + create table if not exists test.weather(ts timestamp, temperature int, humidity float); + +``` + +* 插入单条记录 +```xml + + + insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT}) + +``` +* 插入多条记录 +```xml + + + insert into test.weather (ts, temperature, humidity) values + + (now + #{index}a, #{weather.temperature}, #{weather.humidity}) + + +``` +* 分页查询 +```xml + + + + + + + + + + + + + + ts, temperature, humidity + + + + +``` + diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java new file mode 100644 index 0000000000000000000000000000000000000000..69cd3e0ced2888575d890ffea36407455c4bea7a --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java @@ -0,0 +1,15 @@ +package com.taosdata.jdbc.springbootdemo; + +import org.mybatis.spring.annotation.MapperScan; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@MapperScan(basePackages = {"com.taosdata.jdbc.springbootdemo.dao"}) +@SpringBootApplication +public class SpringbootdemoApplication { + + public static void main(String[] args) { + SpringApplication.run(SpringbootdemoApplication.class, args); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java new file mode 100644 index 0000000000000000000000000000000000000000..9123abd97b82fe1d4267c7341f3ea87bd5127caa --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/controller/WeatherController.java @@ -0,0 +1,60 @@ +package com.taosdata.jdbc.springbootdemo.controller; + +import com.taosdata.jdbc.springbootdemo.domain.Weather; +import com.taosdata.jdbc.springbootdemo.service.WeatherService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.*; + +import java.util.List; + +@RequestMapping("/weather") +@RestController +public class WeatherController { + + @Autowired + private WeatherService weatherService; + + /** + * create database and table + * @return + */ + @GetMapping("/init") + public boolean init(){ + return weatherService.init(); + } + + /** + * Pagination Query + * @param limit + * @param offset + * @return + */ + @GetMapping("/{limit}/{offset}") + public List queryWeather(@PathVariable Long limit, @PathVariable Long offset){ + return weatherService.query(limit, offset); + } + + /** + * upload single weather info + * @param temperature + * @param humidity + * @return + */ + @PostMapping("/{temperature}/{humidity}") + public int saveWeather(@PathVariable int temperature, @PathVariable float humidity){ + + return weatherService.save(temperature, humidity); + } + + /** + * upload multi weather info + * @param weatherList + * @return + */ + @PostMapping("/batch") + public int batchSaveWeather(@RequestBody List weatherList){ + + return weatherService.save(weatherList); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.java new file mode 100644 index 0000000000000000000000000000000000000000..1e3db1f49106606c412851c0a74ad382adea68fb --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/dao/WeatherMapper.java @@ -0,0 +1,19 @@ +package com.taosdata.jdbc.springbootdemo.dao; + +import com.taosdata.jdbc.springbootdemo.domain.Weather; +import org.apache.ibatis.annotations.Param; + +import java.util.List; + +public interface WeatherMapper { + + int insert(Weather weather); + + int batchInsert(List weatherList); + + List select(@Param("limit") Long limit, @Param("offset")Long offset); + + void createDB(); + + void createTable(); +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java new file mode 100644 index 0000000000000000000000000000000000000000..9547a8a89bf4aaff73696091e54f5bb460dcb796 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/domain/Weather.java @@ -0,0 +1,36 @@ +package com.taosdata.jdbc.springbootdemo.domain; + +import java.sql.Timestamp; + +public class Weather { + + private Timestamp ts; + + private int temperature; + + private float humidity; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public int getTemperature() { + return temperature; + } + + public void setTemperature(int temperature) { + this.temperature = temperature; + } + + public float getHumidity() { + return humidity; + } + + public void setHumidity(float humidity) { + this.humidity = humidity; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java new file mode 100644 index 0000000000000000000000000000000000000000..396d70bf9246bfd7e293cccec5b00f2d4aac4963 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/service/WeatherService.java @@ -0,0 +1,40 @@ +package com.taosdata.jdbc.springbootdemo.service; + +import com.taosdata.jdbc.springbootdemo.dao.WeatherMapper; +import com.taosdata.jdbc.springbootdemo.domain.Weather; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.List; + +@Service +public class WeatherService { + + @Autowired + private WeatherMapper weatherMapper; + + public boolean init() { + + weatherMapper.createDB(); + weatherMapper.createTable(); + + return true; + } + + public List query(Long limit, Long offset) { + return weatherMapper.select(limit, offset); + } + + public int save(int temperature, float humidity) { + Weather weather = new Weather(); + weather.setTemperature(temperature); + weather.setHumidity(humidity); + + return weatherMapper.insert(weather); + } + + public int save(List weatherList) { + return weatherMapper.batchInsert(weatherList); + } + +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties new file mode 100644 index 0000000000000000000000000000000000000000..dc77e144f0ffbe131f7eda69b7bb66fd7870c05e --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -0,0 +1,26 @@ +# datasource config +spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver +spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.username=root +spring.datasource.password=taosdata + +spring.datasource.druid.initial-size=5 +spring.datasource.druid.min-idle=5 +spring.datasource.druid.max-active=5 +# max wait time for get connection, ms +spring.datasource.druid.max-wait=60000 + +spring.datasource.druid.validation-query=describe log.dn +spring.datasource.druid.validation-query-timeout=5000 +spring.datasource.druid.test-on-borrow=false +spring.datasource.druid.test-on-return=false +spring.datasource.druid.test-while-idle=true +spring.datasource.druid.time-between-eviction-runs-millis=60000 +spring.datasource.druid.min-evictable-idle-time-millis=600000 +spring.datasource.druid.max-evictable-idle-time-millis=900000 + + +#mybatis +mybatis.mapper-locations=classpath:mapper/*.xml + +logging.level.com.taosdata.jdbc.springbootdemo.dao=debug \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml new file mode 100644 index 0000000000000000000000000000000000000000..e894f9a6583d271d8ce526e9afe79528f0fd5490 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/mapper/WeatherMapper.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + create database if not exists test; + + + + create table if not exists test.weather(ts timestamp, temperature int, humidity float); + + + + ts, temperature, humidity + + + + + + insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT}) + + + + insert into test.weather (ts, temperature, humidity) values + + (now + #{index}a, #{weather.temperature}, #{weather.humidity}) + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/test/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplicationTests.java b/tests/examples/JDBC/springbootdemo/src/test/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplicationTests.java new file mode 100644 index 0000000000000000000000000000000000000000..23a7420dab24a15d9d24341839ba58caa9acb4b9 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/test/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplicationTests.java @@ -0,0 +1,13 @@ +package com.taosdata.jdbc.springbootdemo; + +import org.junit.jupiter.api.Test; +import org.springframework.boot.test.context.SpringBootTest; + +@SpringBootTest +class SpringbootdemoApplicationTests { + + @Test + void contextLoads() { + } + +} diff --git a/tests/examples/c/CMakeLists.txt b/tests/examples/c/CMakeLists.txt index af0b8cd18d391ab328c669514a1596f9cd8f986f..287fca7d410b88d240642a57ec194b3d0c686975 100644 --- a/tests/examples/c/CMakeLists.txt +++ b/tests/examples/c/CMakeLists.txt @@ -1,6 +1,6 @@ PROJECT(TDengine) -IF (TD_WINDOWS) +IF (TD_WINDOWS_64) INCLUDE_DIRECTORIES(${TD_ROOT_DIR}/deps/pthread) ENDIF () diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index ac8ff21aaffc47bbf2a36386f06996b6ad13a086..0a4b8ee9d2bd00ab3daaac0c3a93497de4fd03f8 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -3,21 +3,23 @@ ROOT=./ TARGET=exe -LFLAGS = '-Wl,-rpath,/usr/local/taos/driver' -ltaos -lpthread -lm -lrt -CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 -std=gnu99 +LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt +#LFLAGS = '-Wl,-rpath,/home/zbm/project/td/debug/build/lib/' -L/home/zbm/project/td/debug/build/lib -ltaos -lpthread -lm -lrt +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 \ + -I/usr/local/taos/include -std=gnu99 all: $(TARGET) exe: gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)/asyncdemo $(LFLAGS) gcc $(CFLAGS) ./demo.c -o $(ROOT)/demo $(LFLAGS) + gcc $(CFLAGS) ./prepare.c -o $(ROOT)/prepare $(LFLAGS) gcc $(CFLAGS) ./stream.c -o $(ROOT)/stream $(LFLAGS) - gcc $(CFLAGS) ./subscribe.c -o $(ROOT)/subscribe $(LFLAGS) + gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) clean: - rm $(ROOT)asyncdemo - rm $(ROOT)demo - rm $(ROOT)stream - rm $(ROOT)subscribe - - \ No newline at end of file + rm $(ROOT)/asyncdemo + rm $(ROOT)/demo + rm $(ROOT)/prepare + rm $(ROOT)/stream + rm $(ROOT)/subscribe diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c index 219fa133e02f24cbafb5d446ff3a4aacac2f9c67..0bf93f6f2ddd81e715e7d9cf0b5abfd054635060 100644 --- a/tests/examples/c/subscribe.c +++ b/tests/examples/c/subscribe.c @@ -1,18 +1,3 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - // sample code for TDengine subscribe/consume API // to compile: gcc -o subscribe subscribe.c -ltaos @@ -20,43 +5,239 @@ #include #include #include // include TDengine header file +#include + +void print_result(TAOS_RES* res, int blockFetch) { + TAOS_ROW row = NULL; + int num_fields = taos_num_fields(res); + TAOS_FIELD* fields = taos_fetch_fields(res); + int nRows = 0; + + if (blockFetch) { + nRows = taos_fetch_block(res, &row); + for (int i = 0; i < nRows; i++) { + char temp[256]; + taos_print_row(temp, row + i, fields, num_fields); + puts(temp); + } + } else { + while ((row = taos_fetch_row(res))) { + char temp[256]; + taos_print_row(temp, row, fields, num_fields); + puts(temp); + nRows++; + } + } + + printf("%d rows consumed.\n", nRows); +} -int main(int argc, char *argv[]) -{ - TAOS_SUB *tsub; + +void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + print_result(res, *(int*)param); +} + + +void check_row_count(int line, TAOS_RES* res, int expected) { + int actual = 0; TAOS_ROW row; - char dbname[64], table[64]; - char temp[256]; + while ((row = taos_fetch_row(res))) { + actual++; + } + if (actual != expected) { + printf("line %d: row count mismatch, expected: %d, actual: %d\n", line, expected, actual); + } else { + printf("line %d: %d rows consumed as expected\n", line, actual); + } +} - if ( argc == 1 ) { - printf("usage: %s server-ip db-name table-name \n", argv[0]); - exit(0); - } - if ( argc >= 2 ) strcpy(dbname, argv[2]); - if ( argc >= 3 ) strcpy(table, argv[3]); +void run_test(TAOS* taos) { + taos_query(taos, "drop database if exists test;"); + + usleep(100000); + taos_query(taos, "create database test tables 5;"); + usleep(100000); + taos_query(taos, "use test;"); + usleep(100000); + taos_query(taos, "create table meters(ts timestamp, a int, b binary(20)) tags(loc binary(20), area int);"); + + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:00:00.000', 0, 'china');"); + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:01:00.000', 0, 'china');"); + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:02:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:00:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:01:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:02:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:03:00.000', 0, 'china');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:00:00.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:00.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:01.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:02.000', 0, 'UK');"); + taos_query(taos, "insert into t3 using meters tags('tianjin', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t4 using meters tags('wuhan', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t5 using meters tags('jinan', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t6 using meters tags('haikou', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t7 using meters tags('nanjing', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t8 using meters tags('lanzhou', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t9 using meters tags('tokyo', 0) values('2020-01-01 00:01:02.000', 0, 'japan');"); + + // super tables subscription + + TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + TAOS_RES* res = taos_consume(tsub); + check_row_count(__LINE__, res, 18); + + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + taos_query(taos, "insert into t0 values('2020-01-01 00:03:00.000', 0, 'china');"); + taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 2); + + taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0, 'UK');"); + taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0, 'UK');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 2); + + taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 1); - tsub = taos_subscribe(argv[1], "root", "taosdata", dbname, table, 0, 1000); - if ( tsub == NULL ) { - printf("failed to connet to db:%s\n", dbname); + // keep progress information and restart subscription + taos_unsubscribe(tsub, 1); + taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0, 'china');"); + tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 24); + + // keep progress information and continue previous subscription + taos_unsubscribe(tsub, 1); + tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + // don't keep progress information and continue previous subscription + taos_unsubscribe(tsub, 0); + tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 24); + + // single meter subscription + + taos_unsubscribe(tsub, 0); + tsub = taos_subscribe(taos, 0, "test", "select * from t0;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 5); + + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 1); + + taos_unsubscribe(tsub, 0); +} + + +int main(int argc, char *argv[]) { + const char* host = "127.0.0.1"; + const char* user = "root"; + const char* passwd = "taosdata"; + const char* sql = "select * from meters;"; + const char* topic = "test-multiple"; + int async = 1, restart = 0, keep = 1, test = 0, blockFetch = 0; + + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-h=", 3) == 0) { + host = argv[i] + 3; + continue; + } + if (strncmp(argv[i], "-u=", 3) == 0) { + user = argv[i] + 3; + continue; + } + if (strncmp(argv[i], "-p=", 3) == 0) { + passwd = argv[i] + 3; + continue; + } + if (strcmp(argv[i], "-sync") == 0) { + async = 0; + continue; + } + if (strcmp(argv[i], "-restart") == 0) { + restart = 1; + continue; + } + if (strcmp(argv[i], "-single") == 0) { + sql = "select * from t0;"; + topic = "test-single"; + continue; + } + if (strcmp(argv[i], "-nokeep") == 0) { + keep = 0; + continue; + } + if (strncmp(argv[i], "-sql=", 5) == 0) { + sql = argv[i] + 5; + topic = "test-custom"; + continue; + } + if (strcmp(argv[i], "-test") == 0) { + test = 1; + continue; + } + if (strcmp(argv[i], "-block-fetch") == 0) { + blockFetch = 1; + continue; + } + } + + // init TAOS + taos_init(); + + TAOS* taos = taos_connect(host, user, passwd, "test", 0); + if (taos == NULL) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); exit(1); } - TAOS_FIELD *fields = taos_fetch_subfields(tsub); - int fcount = taos_subfields_count(tsub); + if (test) { + run_test(taos); + taos_close(taos); + exit(0); + } + + TAOS_SUB* tsub = NULL; + if (async) { + // create an asynchronized subscription, the callback function will be called every 1s + tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); + } else { + // create an synchronized subscription, need to call 'taos_consume' manually + tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); + } - printf("start to retrieve data\n"); - printf("please use other taos client, insert rows into %s.%s\n", dbname, table); - while ( 1 ) { - row = taos_consume(tsub); - if ( row == NULL ) break; + if (tsub == NULL) { + printf("failed to create subscription.\n"); + exit(0); + } - taos_print_row(temp, row, fields, fcount); - printf("%s\n", temp); + if (async) { + getchar(); + } else while(1) { + TAOS_RES* res = taos_consume(tsub); + if (res == NULL) { + printf("failed to consume data."); + break; + } else { + print_result(res, blockFetch); + getchar(); + } } - taos_unsubscribe(tsub); + taos_unsubscribe(tsub, keep); + taos_close(taos); return 0; } -